summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLijo Lazar <lijo.lazar@amd.com>2026-01-19 12:19:40 +0530
committerAlex Deucher <alexander.deucher@amd.com>2026-01-29 12:26:48 -0500
commit0d9a49a2ce4738bb802cfc0c9ca5abdecd36e96e (patch)
tree14a573eb10c90b256f77d9de1277661f7d0c1cfa
parent156c0ab1de4e917b106deb63cd7583fa45d7ce29 (diff)
drm/amd/pm: Initialize allowed feature list
Instead of returning feature bit mask of allowed features, initialize the allowed features in the callback implementation itself. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Asad Kamal <asad.kamal@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c92
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c89
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c35
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c80
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h2
11 files changed, 158 insertions, 226 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b635792f5906..75897ac203c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -691,11 +691,8 @@ static int smu_sys_set_pp_table(void *handle,
return ret;
}
-static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
+static int smu_init_driver_allowed_feature_mask(struct smu_context *smu)
{
- uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
- int ret = 0;
-
/*
* With SCPM enabled, the allowed featuremasks setting(via
* PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
@@ -710,15 +707,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
- ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
- SMU_FEATURE_MAX/32);
- if (ret)
- return ret;
-
- smu_feature_list_add_bits(smu, SMU_FEATURE_LIST_ALLOWED,
- (unsigned long *)allowed_feature_mask);
-
- return ret;
+ return smu_init_allowed_features(smu);
}
static int smu_set_funcs(struct amdgpu_device *adev)
@@ -1949,7 +1938,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
if (!smu->pm_enabled)
return 0;
- ret = smu_get_driver_allowed_feature_mask(smu);
+ ret = smu_init_driver_allowed_feature_mask(smu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2290298579e1..7c63c631f6d4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -812,11 +812,10 @@ struct pptable_funcs {
int (*run_btc)(struct smu_context *smu);
/**
- * @get_allowed_feature_mask: Get allowed feature mask.
- * &feature_mask: Array to store feature mask.
- * &num: Elements in &feature_mask.
+ * @init_allowed_features: Initialize allowed features bitmap.
+ * Directly sets allowed features using smu_feature wrapper functions.
*/
- int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ int (*init_allowed_features)(struct smu_context *smu);
/**
* @get_current_power_state: Get the current power state.
@@ -2052,14 +2051,6 @@ static inline void smu_feature_bits_copy(struct smu_feature_bits *dst,
bitmap_copy(dst->bits, src, nbits);
}
-static inline void smu_feature_bits_or(struct smu_feature_bits *dst,
- const struct smu_feature_bits *src1,
- const unsigned long *src2,
- unsigned int nbits)
-{
- bitmap_or(dst->bits, src1->bits, src2, nbits);
-}
-
static inline struct smu_feature_bits *
__smu_feature_get_list(struct smu_context *smu, enum smu_feature_list list)
{
@@ -2128,15 +2119,6 @@ static inline void smu_feature_list_set_bits(struct smu_context *smu,
smu->smu_feature.feature_num);
}
-static inline void smu_feature_list_add_bits(struct smu_context *smu,
- enum smu_feature_list list,
- const unsigned long *src)
-{
- struct smu_feature_bits *bits = __smu_feature_get_list(smu, list);
-
- smu_feature_bits_or(bits, bits, src, smu->smu_feature.feature_num);
-}
-
static inline void smu_feature_list_to_arr32(struct smu_context *smu,
enum smu_feature_list list,
uint32_t *arr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 7c5ce6a6e2ca..b22a0e91826d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -345,14 +345,9 @@ static int arcturus_init_smc_tables(struct smu_context *smu)
}
static int
-arcturus_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+arcturus_init_allowed_features(struct smu_context *smu)
{
- if (num > 2)
- return -EINVAL;
-
- /* pptable will handle the features to enable */
- memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
@@ -1877,7 +1872,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
- .get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
+ .init_allowed_features = arcturus_init_allowed_features,
/* btc */
.run_btc = arcturus_run_btc,
/* dpm/clk tables */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 1f84654bbc85..f14eed052526 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -275,89 +275,83 @@ static bool is_asic_secure(struct smu_context *smu)
}
static int
-navi10_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+navi10_init_allowed_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
- | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
- | FEATURE_MASK(FEATURE_PPT_BIT)
- | FEATURE_MASK(FEATURE_TDC_BIT)
- | FEATURE_MASK(FEATURE_GFX_EDC_BIT)
- | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
- | FEATURE_MASK(FEATURE_VR0HOT_BIT)
- | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
- | FEATURE_MASK(FEATURE_THERMAL_BIT)
- | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
- | FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT)
- | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT)
- | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT);
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
+
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_PREFETCHER_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_MP0CLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_RSMU_SMN_CG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_PPT_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_TDC_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_EDC_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_APCC_PLUS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VR0HOT_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FAN_CONTROL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_THERMAL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_LED_DISPLAY_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_LCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_DCEFCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_DSTATE_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_BACO_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_SS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_APCC_DFLL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_CTF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_OUT_OF_BAND_MONITOR_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_TEMP_DEPENDENT_VMIN_BIT);
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFXCLK_BIT);
if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_LINK_BIT);
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_ULV_BIT);
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_GFXCLK_BIT);
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFXOFF_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MMHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ATHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VCN_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_JPEG_PG_BIT);
if (smu->dc_controlled_by_gpio)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ACDC_BIT);
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_SOCCLK_BIT);
- /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
if (!(is_asic_secure(smu) &&
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0)) &&
- (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
- | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
- | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
+ (adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_UCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MEM_VDDCI_SCALING_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MEM_MVDD_SCALING_BIT);
+ }
- /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
if (is_asic_secure(smu) &&
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0))
- *(uint64_t *)feature_mask &=
- ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_SOCCLK_BIT);
return 0;
}
@@ -3277,7 +3271,7 @@ static int navi10_set_config_table(struct smu_context *smu,
}
static const struct pptable_funcs navi10_ppt_funcs = {
- .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
+ .init_allowed_features = navi10_init_allowed_features,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index f930ba2733e9..98a02fc08214 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -276,85 +276,82 @@ static const uint8_t sienna_cichlid_throttler_map[] = {
};
static int
-sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+sienna_cichlid_init_allowed_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_FCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_FCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_UCLK_BIT)
- | FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
- | FEATURE_MASK(FEATURE_DF_CSTATE_BIT)
- | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
- | FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_VR0HOT_BIT)
- | FEATURE_MASK(FEATURE_PPT_BIT)
- | FEATURE_MASK(FEATURE_TDC_BIT)
- | FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT)
- | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
- | FEATURE_MASK(FEATURE_THERMAL_BIT)
- | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
+
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_PREFETCHER_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_FCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_MP0CLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_DCEFCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_FCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_UCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_DSTATE_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DF_CSTATE_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_RSMU_SMN_CG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_SS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VR0HOT_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_PPT_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_TDC_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_BACO_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_APCC_DFLL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_CTF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FAN_CONTROL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_THERMAL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_OUT_OF_BAND_MONITOR_BIT);
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_GPO_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFX_GPO_BIT);
}
if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
(amdgpu_ip_version(adev, MP1_HWIP, 0) > IP_VERSION(11, 0, 7)) &&
!(adev->flags & AMD_IS_APU))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_DCS_BIT);
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
- | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
- | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
+ if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_UCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MEM_VDDCI_SCALING_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MEM_MVDD_SCALING_BIT);
+ }
if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_LINK_BIT);
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_SOCCLK_BIT);
if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_ULV_BIT);
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_GFXCLK_BIT);
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFXOFF_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ATHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MMHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN ||
smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MM_DPM_PG_BIT);
if (smu->dc_controlled_by_gpio)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ACDC_BIT);
if (amdgpu_device_should_use_aspm(adev))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_LCLK_BIT);
return 0;
}
@@ -3085,7 +3082,7 @@ out:
}
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
- .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
+ .init_allowed_features = sienna_cichlid_init_allowed_features,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
.dpm_set_vcn_enable = sienna_cichlid_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = sienna_cichlid_dpm_set_jpeg_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 9de0b676bb7b..3b6a34644a92 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -329,14 +329,9 @@ static int aldebaran_init_smc_tables(struct smu_context *smu)
return smu_v13_0_init_smc_tables(smu);
}
-static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+static int aldebaran_init_allowed_features(struct smu_context *smu)
{
- if (num > 2)
- return -EINVAL;
-
- /* pptable will handle the features to enable */
- memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
@@ -1967,7 +1962,7 @@ static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
static const struct pptable_funcs aldebaran_ppt_funcs = {
/* init dpm */
- .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
+ .init_allowed_features = aldebaran_init_allowed_features,
/* dpm/clk tables */
.set_default_dpm_table = aldebaran_set_default_dpm_table,
.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index ce52b616b935..9c4298736b28 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -287,49 +287,44 @@ static const uint8_t smu_v13_0_0_throttler_map[] = {
};
static int
-smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+smu_v13_0_0_init_allowed_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0xff, sizeof(uint32_t) * num);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFXCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_IMU_BIT);
}
if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
!(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ATHUB_MMHUB_PG_BIT);
if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_SOCCLK_BIT);
- /* PMFW 78.58 contains a critical fix for gfxoff feature */
if ((smu->smc_fw_version < 0x004e3a00) ||
!(adev->pm.pp_feature & PP_GFXOFF_MASK))
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFXOFF_BIT);
if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_UCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VMEMP_SCALING_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VDDIO_MEM_SCALING_BIT);
}
if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_GFXCLK_BIT);
if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_LINK_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_LCLK_BIT);
}
if (!(adev->pm.pp_feature & PP_ULV_MASK))
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ smu_feature_list_clear_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -3130,7 +3125,7 @@ static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
}
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
- .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
+ .init_allowed_features = smu_v13_0_0_init_allowed_features,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
.i2c_init = smu_v13_0_0_i2c_control_init,
.i2c_fini = smu_v13_0_0_i2c_control_fini,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 1e82c43c851a..bd893e95515f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -742,15 +742,9 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
return smu_v13_0_fini_smc_tables(smu);
}
-static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
+static int smu_v13_0_6_init_allowed_features(struct smu_context *smu)
{
- if (num > 2)
- return -EINVAL;
-
- /* pptable will handle the features to enable */
- memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
@@ -3836,7 +3830,7 @@ static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
- .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
+ .init_allowed_features = smu_v13_0_6_init_allowed_features,
/* dpm/clk tables */
.set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
.populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 0375e8484b2a..415766dbfe6c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -265,71 +265,67 @@ static const uint8_t smu_v13_0_7_throttler_map[] = {
};
static int
-smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+smu_v13_0_7_init_allowed_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
+ smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_DATA_READ_BIT);
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_IMU_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
}
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFXOFF_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_UCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_FCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VMEMP_SCALING_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_SOCCLK_BIT);
if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_LINK_BIT);
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_GFXCLK_BIT);
if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_ULV_BIT);
+
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_LCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_MP0CLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MM_DPM_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_VCN_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_FCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DF_CSTATE_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_THROTTLERS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_VR0HOT_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_CTF_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FAN_CONTROL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DS_SOCCLK_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_MEM_TEMP_READ_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_FW_DSTATE_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_SOC_MPCLK_DS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_BACO_MPCLK_DS_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_GFX_PCC_DFLL_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_SOC_CG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_BACO_BIT);
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_DPM_DCN_BIT);
if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
+ smu_feature_list_set_bit(smu, SMU_FEATURE_LIST_ALLOWED, FEATURE_ATHUB_MMHUB_PG_BIT);
return 0;
}
@@ -2736,7 +2732,7 @@ static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
- .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
+ .init_allowed_features = smu_v13_0_7_init_allowed_features,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
.is_dpm_running = smu_v13_0_7_is_dpm_running,
.init_microcode = smu_v13_0_init_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index d2aa5fabfca4..3c351ee41e68 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -264,14 +264,9 @@ static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
};
-static int
-smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
+static int smu_v14_0_2_init_allowed_features(struct smu_context *smu)
{
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0xff, sizeof(uint32_t) * num);
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
@@ -2757,7 +2752,7 @@ static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
}
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
- .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
+ .init_allowed_features = smu_v14_0_2_init_allowed_features,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
.i2c_init = smu_v14_0_2_i2c_control_init,
.i2c_fini = smu_v14_0_2_i2c_control_fini,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 0f7778410a3a..24848da90234 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -70,7 +70,7 @@
#define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
#define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu)
#define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) smu_ppt_funcs(get_allowed_feature_mask, 0, smu, feature_mask, num)
+#define smu_init_allowed_features(smu) smu_ppt_funcs(init_allowed_features, 0, smu)
#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)