summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c59
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c15
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c123
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c83
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h295
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h427
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c44
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c118
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c212
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c2272
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h313
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h5
48 files changed, 4043 insertions, 615 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index eca93a9d0b84..62b0b1ef0d10 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -33,6 +33,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include <asm/processor.h>
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
@@ -680,6 +681,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - minimum(not available for Vega20 and Navi1x) and maximum memory
* clock labeled OD_MCLK
*
+ * - minimum and maximum fabric clock labeled OD_FCLK (SMU13)
+ *
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve. This is
* available for Vega20 and NV1X.
@@ -715,10 +718,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - First select manual using power_dpm_force_performance_level
*
* - For clock frequency setting, enter a new value by writing a
- * string that contains "s/m index clock" to the file. The index
+ * string that contains "s/m/f index clock" to the file. The index
* should be 0 if to set minimum clock. And 1 if to set maximum
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- * "m 1 800" will update maximum mclk to be 800Mhz. For core
+ * "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will
+ * update maximum fabric clock to be 1600Mhz. For core
* clocks on VanGogh, the string contains "p core index clock".
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
@@ -768,6 +772,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
+ else if (*buf == 'f')
+ type = PP_OD_EDIT_FCLK_TABLE;
else if (*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
@@ -843,9 +849,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int size = 0;
int ret;
- enum pp_clock_type od_clocks[6] = {
+ enum pp_clock_type od_clocks[] = {
OD_SCLK,
OD_MCLK,
+ OD_FCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
@@ -857,10 +864,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (ret)
return ret;
- for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
- ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
- if (ret)
- break;
+ for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) {
+ amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
}
if (size == 0)
@@ -1588,7 +1593,7 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
adev_to_drm(adev)->unique,
- atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+ str_enabled_disabled(atomic_read(&adev->throttling_logging_enabled)),
adev->throttling_logging_rs.interval / HZ + 1);
}
@@ -1910,8 +1915,6 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
-
*states = ATTR_STATE_SUPPORTED;
if (!amdgpu_dpm_is_overdrive_supported(adev)) {
@@ -1919,10 +1922,8 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
- if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0)) {
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0, 12.1.0 SRIOV/BM support */
+ if (amdgpu_is_multi_aid(adev)) {
if (amdgpu_sriov_multi_vf_mode(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2000,9 +2001,7 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 5, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0)))
+ amdgpu_is_multi_aid(adev)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2023,9 +2022,7 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 5, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0)))
+ amdgpu_is_multi_aid(adev)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2035,9 +2032,7 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
if (gc_ver == IP_VERSION(9, 4, 2) ||
- gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0))
+ amdgpu_is_multi_aid(adev))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2241,7 +2236,7 @@ static ssize_t amdgpu_show_npm_status(struct device *dev,
if (r)
return r;
- return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(npower));
}
/**
@@ -2651,6 +2646,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -3732,8 +3728,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0))) &&
+ amdgpu_is_multi_aid(adev)) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3815,18 +3810,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
- (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0))) &&
+ amdgpu_is_multi_aid(adev)) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) ||
- (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0))) &&
+ amdgpu_is_multi_aid(adev)) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
@@ -3855,9 +3846,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* hotspot temperature for gc 9,4,3*/
- if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4) ||
- gc_ver == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_is_multi_aid(adev)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 281a5e377aee..e1c509bfc390 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -65,7 +65,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
addr = smc_start_address;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
@@ -109,7 +109,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
}
done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}
@@ -252,7 +252,7 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
if (ucode_size & 3)
return -EINVAL;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
@@ -265,7 +265,7 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
ucode_size -= 4;
}
WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return 0;
}
@@ -276,11 +276,11 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
unsigned long flags;
int ret;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
*value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}
@@ -291,11 +291,11 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
unsigned long flags;
int ret;
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ spin_lock_irqsave(&adev->reg.smc.lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ spin_unlock_irqrestore(&adev->reg.smc.lock, flags);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 2b5ac21fee39..1d6e30269d56 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -104,6 +104,21 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
PP_GFXOFF_MASK);
hwmgr->pp_table_version = PP_TABLE_V0;
hwmgr->od_enabled = false;
+ switch (hwmgr->chip_id) {
+ case CHIP_BONAIRE:
+ /* R9 M380 in iMac 2015: SMU hangs when enabling MCLK DPM
+ * R7 260X cards with old MC ucode: MCLK DPM is unstable
+ */
+ if (adev->pdev->subsystem_vendor == 0x106B ||
+ adev->pdev->device == 0x6658) {
+ dev_info(adev->dev, "disabling MCLK DPM on quirky ASIC");
+ adev->pm.pp_feature &= ~PP_MCLK_DPM_MASK;
+ hwmgr->feature_mask &= ~PP_MCLK_DPM_MASK;
+ }
+ break;
+ default:
+ break;
+ }
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e38222877f7e..8c37aa452569 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -787,7 +787,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.vddc_dependency_on_mclk;
struct phm_cac_leakage_table *std_voltage_table =
hwmgr->dyn_state.cac_leakage_table;
- uint32_t i;
+ uint32_t i, clk;
PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
@@ -804,10 +804,12 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
data->dpm_table.sclk_table.count = 0;
for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ clk = min(allowed_vdd_sclk_table->entries[i].clk, data->sclk_cap);
+
if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
+ clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
+ clk;
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
@@ -2794,11 +2796,11 @@ static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
if (tmp)
return -EINVAL;
- tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
if (tmp)
return -EINVAL;
- tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_display_clock);
if (tmp)
return -EINVAL;
@@ -2883,8 +2885,8 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->dyn_state.vddc_dependency_on_display_clock);
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -2955,6 +2957,70 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
return ret;
}
+static int smu7_init_voltage_dependency_on_display_clock_table(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table;
+
+ if (!amdgpu_device_ip_get_ip_block(hwmgr->adev, AMD_IP_BLOCK_TYPE_DCE))
+ return 0;
+
+ table = kzalloc(struct_size(table, entries, 4), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ if (hwmgr->chip_id >= CHIP_POLARIS10) {
+ table->entries[0].clk = 38918;
+ table->entries[1].clk = 45900;
+ table->entries[2].clk = 66700;
+ table->entries[3].clk = 113200;
+
+ table->entries[0].v = 700;
+ table->entries[1].v = 740;
+ table->entries[2].v = 800;
+ table->entries[3].v = 900;
+ } else {
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CZ) {
+ table->entries[0].clk = 35200;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 46700;
+ table->entries[3].clk = 64300;
+ } else {
+ table->entries[0].clk = 0;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 54000;
+ table->entries[3].clk = 62500;
+ }
+
+ table->entries[0].v = 0;
+ table->entries[1].v = 720;
+ table->entries[2].v = 810;
+ table->entries[3].v = 900;
+ }
+
+ table->count = 4;
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = table;
+ return 0;
+}
+
+static void smu7_set_sclk_cap(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->sclk_cap = 0xffffffff;
+
+ if (hwmgr->od_enabled)
+ return;
+
+ /* R9 390X board: last sclk dpm level is unstable, use lower sclk */
+ if (adev->pdev->device == 0x67B0 &&
+ adev->pdev->subsystem_vendor == 0x1043)
+ data->sclk_cap = 104000; /* 1040 MHz */
+
+ if (data->sclk_cap != 0xffffffff)
+ dev_info(adev->dev, "sclk cap: %u kHz on quirky ASIC\n", data->sclk_cap * 10);
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -2966,6 +3032,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return -ENOMEM;
hwmgr->backend = data;
+ smu7_set_sclk_cap(hwmgr);
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
@@ -2983,6 +3050,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_get_elb_voltages(hwmgr);
}
+ result = smu7_init_voltage_dependency_on_display_clock_table(hwmgr);
+ if (result)
+ goto fail;
+
if (hwmgr->pp_table_version == PP_TABLE_V1) {
smu7_complete_dependency_tables(hwmgr);
smu7_set_private_data_based_on_pptable_v1(hwmgr);
@@ -2991,9 +3062,6 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_set_private_data_based_on_pptable_v0(hwmgr);
}
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
if (result)
goto fail;
@@ -3079,13 +3147,40 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
+static uint32_t smu7_lookup_vddc_from_dispclk(struct pp_hwmgr *hwmgr)
+{
+ const struct amd_pp_display_configuration *cfg = hwmgr->display_config;
+ const struct phm_clock_voltage_dependency_table *vddc_dep_on_dispclk =
+ hwmgr->dyn_state.vddc_dependency_on_display_clock;
+ uint32_t i;
+
+ if (!vddc_dep_on_dispclk || !vddc_dep_on_dispclk->count ||
+ !cfg || !cfg->num_display || !cfg->display_clk)
+ return 0;
+
+ /* Start from 1 because ClocksStateUltraLow should not be used according to DC. */
+ for (i = 1; i < vddc_dep_on_dispclk->count; ++i)
+ if (vddc_dep_on_dispclk->entries[i].clk >= cfg->display_clk)
+ return vddc_dep_on_dispclk->entries[i].v;
+
+ return vddc_dep_on_dispclk->entries[vddc_dep_on_dispclk->count - 1].v;
+}
+
+static void smu7_apply_minimum_dce_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ uint32_t req_vddc = smu7_lookup_vddc_from_dispclk(hwmgr);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VddC_Request,
+ req_vddc * VOLTAGE_SCALE,
+ NULL);
+}
+
static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->pp_table_version == PP_TABLE_V1)
- phm_apply_dal_min_voltage_request(hwmgr);
-/* TO DO for v0 iceland and Ci*/
+ smu7_apply_minimum_dce_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -3821,7 +3916,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
- performance_level->engine_clock = engine_clock;
+ performance_level->engine_clock = min(engine_clock, data->sclk_cap);
pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
index d9e8b386bd4d..66adabeab6a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
@@ -234,6 +234,7 @@ struct smu7_hwmgr {
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
+ uint32_t sclk_cap;
struct smu7_leakage_voltage vddc_leakage;
struct smu7_leakage_voltage vddci_leakage;
struct smu7_leakage_voltage vddcgfx_leakage;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 40ecaac6c604..30d83e18db40 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -484,52 +484,6 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
return 0;
}
-/**
- * phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings
- *
- * @hwmgr: the address of the powerplay hardware manager.
- */
-int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4);
-
- if (NULL == table_clk_vlt) {
- pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[0].v = 700;
- else
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[1].v = 740;
- else
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[2].v = 800;
- else
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- if (pptable_info != NULL)
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
{
uint32_t level = 0;
@@ -540,43 +494,6 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
return level;
}
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table || table->count <= 0
- || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
- || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request,
- req_volt,
- NULL);
- return;
- }
- }
- pr_err("DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
index 83b3c9315143..d370bfd0764d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
@@ -87,9 +87,7 @@ extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_t
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk);
-extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index c661185753b4..3ae45eac0c5c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -631,6 +631,7 @@ struct phm_dynamic_state_info {
struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk;
struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk;
struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk;
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_display_clock;
struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl;
struct phm_clock_array *valid_sclk_values;
struct phm_clock_array *valid_mclk_values;
@@ -772,7 +773,6 @@ struct pp_hwmgr {
const struct pp_smumgr_func *smumgr_funcs;
bool is_kicker;
- enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 62ebec1c6fe3..731355bdb9bc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -245,7 +245,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
case 0x67B8:
- case 0x66B0:
+ case 0x67B0:
smu_data->power_tune_defaults = &defaults_hawaii_xt;
break;
case 0x6640:
@@ -543,12 +543,11 @@ static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
if (ci_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
+ (uint32_t *)&smu_data->power_tune_table.TdcWaterfallCtl, SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
@@ -1217,7 +1216,7 @@ static int ci_populate_single_memory_level(
}
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
+ memory_level->EnabledForActivity = 0;
memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
memory_level->VoltageDownH = 0;
@@ -1322,6 +1321,14 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
return result;
}
+ if (data->mclk_dpm_key_disabled && dpm_table->mclk_table.count) {
+ /* Populate the table with the highest MCLK level when MCLK DPM is disabled */
+ for (i = 0; i < dpm_table->mclk_table.count - 1; i++) {
+ levels[i] = levels[dpm_table->mclk_table.count - 1];
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+ }
+
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
dev_id = adev->pdev->device;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b05c8bbdf2f3..8faf7de7aaa9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -47,6 +47,7 @@
#include "smu_v14_0_0_ppt.h"
#include "smu_v14_0_2_ppt.h"
#include "smu_v15_0_0_ppt.h"
+#include "smu_v15_0_8_ppt.h"
#include "amd_pcie.h"
/*
@@ -628,6 +629,18 @@ int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type ms
return ret;
}
+int amdgpu_smu_ras_feature_is_enabled(struct amdgpu_device *adev,
+ enum smu_feature_mask mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->feature_is_enabled)
+ ret = smu->ppt_funcs->feature_is_enabled(smu, mask);
+
+ return ret;
+}
+
static int smu_sys_get_pp_table(void *handle,
char **table)
{
@@ -790,6 +803,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(15, 0, 0):
smu_v15_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(15, 0, 8):
+ smu_v15_0_8_set_ppt_funcs(smu);
+ smu->od_enabled = true;
+ break;
default:
return -EINVAL;
}
@@ -2953,6 +2970,7 @@ int smu_get_power_limit(void *handle,
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
+ case IP_VERSION(15, 0, 8):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
NULL, NULL, NULL);
@@ -3044,6 +3062,8 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
clk_type = SMU_OD_MCLK; break;
+ case OD_FCLK:
+ clk_type = SMU_OD_FCLK; break;
case OD_VDDC_CURVE:
clk_type = SMU_OD_VDDC_CURVE; break;
case OD_RANGE:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a6303d093c50..126fc54cb511 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -389,6 +389,7 @@ struct smu_table_context {
void *metrics_table;
void *clocks_table;
void *watermarks_table;
+ struct mutex metrics_lock;
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
@@ -1997,6 +1998,8 @@ const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
uint32_t param, uint32_t *readarg);
+int amdgpu_smu_ras_feature_is_enabled(struct amdgpu_device *adev,
+ enum smu_feature_mask mask);
#endif
void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
@@ -2161,4 +2164,21 @@ static inline void smu_feature_init(struct smu_context *smu, int feature_num)
smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
}
+/*
+ * smu_safe_u16_nn - Make u16 safe by filtering negative overflow errors
+ * @val: Input u16 value, may contain invalid negative overflows
+ *
+ * Convert u16 to non-negative value. Cast to s16 to detect negative values
+ * caused by calculation errors. Return 0 for negative errors, return
+ * original value if valid.
+ *
+ * Return: Valid u16 value or 0
+ */
+static inline u16 smu_safe_u16_nn(u16 val)
+{
+ s16 tmp = (s16)val;
+
+ return tmp < 0 ? 0 : val;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h
new file mode 100644
index 000000000000..6993d866183d
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_DRIVER_IF_H
+#define SMU_15_0_8_DRIVER_IF_H
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0,
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ /* 50 Kbits/s not supported anymore! */
+ UNSUPPORTED_1,
+ /* 100 Kbits/s */
+ I2C_SPEED_STANDARD_100K,
+ /* 400 Kbits/s */
+ I2C_SPEED_FAST_400K,
+ /* 1 Mbits/s (in fast mode) */
+ I2C_SPEED_FAST_PLUS_1M,
+ /* 1 Mbits/s (in high speed mode) not supported anymore!*/
+ UNSUPPORTED_2,
+ /* 2.3 Mbits/s not supported anymore! */
+ UNSUPPORTED_3,
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+/* bit should be 0 for read, 1 for write */
+#define CMDCONFIG_READWRITE_BIT 2
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+/* 64 Bit register offsets for PPSMC_MSG_McaBankDumpDW, PPSMC_MSG_McaBankCeDumpDW messages
+ * eg to read MCA_BANK_OFFSET_SYND for CE index, call PPSMC_MSG_McaBankCeDumpDW twice,
+ * (index << 16 + MCA_BANK_OFFSET_SYND*8) argument for 1st DWORD, and
+ * ((index << 16 ) + MCA_BANK_OFFSET_SYND*8 + 4) argument for 2nd DWORD */
+typedef enum {
+ MCA_BANK_OFFSET_CTL = 0,
+ MCA_BANK_OFFSET_STATUS = 1,
+ MCA_BANK_OFFSET_ADDR = 2,
+ MCA_BANK_OFFSET_MISC = 3,
+ MCA_BANK_OFFSET_IPID = 5,
+ MCA_BANK_OFFSET_SYND = 6,
+ MCA_BANK_OFFSET_MAX = 16,
+} MCA_BANK_OFFSET_e;
+
+/* Firmware MP1 AID MCA Error Codes stored in MCA_MP_MP1:MCMP1_SYNDT0 errorinformation */
+typedef enum {
+ /* MMHUB */
+ CODE_DAGB0 = 0,
+ CODE_DAGB1 = 1,
+ CODE_DAGB2 = 2,
+ CODE_DAGB3 = 3,
+ CODE_DAGB4 = 4,
+ CODE_EA0 = 5,
+ CODE_EA1 = 6,
+ CODE_EA2 = 7,
+ CODE_EA3 = 8,
+ CODE_EA4 = 9,
+ CODE_UTCL2_ROUTER = 10,
+ CODE_VML2 = 11,
+ CODE_VML2_WALKER = 12,
+ CODE_MMCANE = 13,
+
+ /* VCN VCPU */
+ CODE_VIDD = 14,
+ CODE_VIDV = 15,
+ /* VCN JPEG */
+ CODE_JPEG0S = 16,
+ CODE_JPEG0D = 17,
+ CODE_JPEG1S = 18,
+ CODE_JPEG1D = 19,
+ CODE_JPEG2S = 20,
+ CODE_JPEG2D = 21,
+ CODE_JPEG3S = 22,
+ CODE_JPEG3D = 23,
+ CODE_JPEG4S = 24,
+ CODE_JPEG4D = 25,
+ CODE_JPEG5S = 26,
+ CODE_JPEG5D = 27,
+ CODE_JPEG6S = 28,
+ CODE_JPEG6D = 29,
+ CODE_JPEG7S = 30,
+ CODE_JPEG7D = 31,
+ /* VCN MMSCH */
+ CODE_MMSCHD = 32,
+
+ /* SDMA */
+ CODE_SDMA0 = 33,
+ CODE_SDMA1 = 34,
+ CODE_SDMA2 = 35,
+ CODE_SDMA3 = 36,
+
+ /* SOC */
+ CODE_HDP = 37,
+ CODE_ATHUB = 38,
+ CODE_IH = 39,
+ CODE_XHUB_POISON = 40,
+ CODE_SMN_SLVERR = 41,
+ CODE_WDT = 42,
+
+ CODE_UNKNOWN = 43,
+ CODE_DMA = 44,
+ CODE_COUNT = 45,
+} ERR_CODE_e;
+
+/* Firmware MP5 XCD MCA Error Codes stored in MCA_MP_MP5:MCMP5_SYNDT0 errorinformation */
+typedef enum {
+ /* SH POISON FED */
+ SH_FED_CODE = 0,
+ /* GCEA Pin UE_ERR regs */
+ GCEA_CODE = 1,
+ SQ_CODE = 2,
+ LDS_CODE = 3,
+ GDS_CODE = 4,
+ SP0_CODE = 5,
+ SP1_CODE = 6,
+ TCC_CODE = 7,
+ TCA_CODE = 8,
+ TCX_CODE = 9,
+ CPC_CODE = 10,
+ CPF_CODE = 11,
+ CPG_CODE = 12,
+ SPI_CODE = 13,
+ RLC_CODE = 14,
+ /* GCEA Pin, UE_EDC regs */
+ SQC_CODE = 15,
+ TA_CODE = 16,
+ TD_CODE = 17,
+ TCP_CODE = 18,
+ TCI_CODE = 19,
+ /* GC Router */
+ GC_ROUTER_CODE = 20,
+ VML2_CODE = 21,
+ VML2_WALKER_CODE = 22,
+ ATCL2_CODE = 23,
+ GC_CANE_CODE = 24,
+
+ /* SOC error codes 41-43 are common with ERR_CODE_e */
+ MP5_CODE_SMN_SLVERR = CODE_SMN_SLVERR,
+ MP5_CODE_UNKNOWN = CODE_UNKNOWN,
+} GC_ERROR_CODE_e;
+
+/* SW I2C Command Table */
+typedef struct {
+ /* Return data for read. Data to send for write*/
+ uint8_t ReadWriteData;
+ /* Includes whether associated command should have a stop or restart command,
+ * and is a read or write */
+ uint8_t CmdConfig;
+} SwI2cCmd_t;
+
+/* SW I2C Request Table */
+typedef struct {
+ /* CKSVII2C0(0) or //CKSVII2C1(1) */
+ uint8_t I2CcontrollerPort;
+ /* Use I2cSpeed_e to indicate speed to select */
+ uint8_t I2CSpeed;
+ /* Slave address of device */
+ uint8_t SlaveAddress;
+ /* Number of commands */
+ uint8_t NumCmds;
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t;
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+ uint32_t Spare[8];
+ /* SMU internal use */
+ uint32_t MmHubPadding[8];
+} SwI2cRequestExternal_t;
+
+typedef enum {
+ PPCLK_UCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+/* TODO confirm if this is used in MI300 PPSMC_MSG_SetUclkDpmMode */
+typedef enum {
+ UCLK_DPM_MODE_BANDWIDTH,
+ UCLK_DPM_MODE_LATENCY,
+} UCLK_DPM_MODE_e;
+
+typedef struct {
+ /* 2 AVFS.PSM chains */
+ uint16_t AvgPsmCount_Chain0[13];
+ uint16_t AvgPsmCount_Chain1[15];
+ uint16_t MinPsmCount_Chain0[13];
+ uint16_t MinPsmCount_Chain1[15];
+ float MaxTemperature;
+
+ /* For voltage conversions, these are the array indexes
+ * 0:SOCIO
+ * 1:065_UCIE
+ * 2:075_UCIE
+ * 3:11_GTA
+ * 4:075_GTA */
+ float MinPsmVoltage[5];
+ float AvgPsmVoltage[5];
+} AvfsDebugTableMid_t;
+
+typedef struct {
+ /* 7 AVFS.PSM chains - not including TRO */
+ uint16_t AvgPsmCount_Chain0[15];
+ uint16_t AvgPsmCount_Chain1[15];
+ uint16_t AvgPsmCount_Chain2[13];
+ uint16_t AvgPsmCount_Chain3[13];
+ uint16_t AvgPsmCount_Chain4[15];
+ uint16_t AvgPsmCount_Chain5[15];
+ uint16_t AvgPsmCount_Chain6[5];
+ uint16_t MinPsmCount_Chain0[15];
+ uint16_t MinPsmCount_Chain1[15];
+ uint16_t MinPsmCount_Chain2[13];
+ uint16_t MinPsmCount_Chain3[13];
+ uint16_t MinPsmCount_Chain4[15];
+ uint16_t MinPsmCount_Chain5[15];
+ uint16_t MinPsmCount_Chain6[5];
+ float MaxTemperature;
+
+ /* For voltage conversions, these are the array indexes
+ * 0:VDDX */
+ float MinPsmVoltage;
+ float AvgPsmVoltage;
+} AvfsDebugTableAid_t;
+
+typedef struct {
+ /* 0-27 GFX, 28-29 SOC */
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableXcd_t;
+
+/* Defines used for IH-based thermal interrupts to GFX driver - A/X only */
+#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define IH_INTERRUPT_VFFLR_INT 0xA
+
+/* thermal over-temp mask defines for IH interrup to host */
+#define THROTTLER_PROCHOT_BIT 0
+#define THROTTLER_RESERVED 1
+/* AID, XCD, CCD throttling */
+#define THROTTLER_THERMAL_SOCKET_BIT 2
+/* VRHOT */
+#define THROTTLER_THERMAL_VR_BIT 3
+#define THROTTLER_THERMAL_HBM_BIT 4
+/* UEs are always reported, set flag to 0 to prevent clearing of UEs */
+#define ClearMcaOnRead_UE_FLAG_MASK 0x1
+/* Enable CE logging and clearing to driver */
+#define ClearMcaOnRead_CE_POLL_MASK 0x2
+/* AID MMHUB client IP CE Logging and clearing */
+#define ClearMcaOnRead_MMHUB_POLL_MASK 0x4
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h
new file mode 100644
index 000000000000..a3401c4cc20b
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_PMFW_H
+#define SMU_15_0_8_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+#define NUM_GFX_P2S_TABLES 8
+#define NUM_PSM_DIDT_THRESHOLDS 3
+#define NUM_XCD_XVMIN_VMIN_THRESHOLDS 3
+
+#define PRODUCT_MODEL_NUMBER_LEN 20
+#define PRODUCT_NAME_LEN 64
+#define PRODUCT_SERIAL_LEN 20
+#define PRODUCT_MANUFACTURER_NAME_LEN 32
+#define PRODUCT_FRU_ID_LEN 32
+
+//Feature ID list
+#define FEATURE_ID_DATA_CALCULATION 1
+#define FEATURE_ID_DPM_FCLK 2
+#define FEATURE_ID_DPM_GFXCLK 3
+#define FEATURE_ID_DPM_SPARE_4 4
+#define FEATURE_ID_DPM_SPARE_5 5
+#define FEATURE_ID_DPM_UCLK 6
+#define FEATURE_ID_DPM_SPARE_7 7
+#define FEATURE_ID_DPM_XGMI 8
+#define FEATURE_ID_DS_FCLK 9
+#define FEATURE_ID_DS_GFXCLK 10
+#define FEATURE_ID_DS_LCLK 11
+#define FEATURE_ID_DS_MP0CLK 12
+#define FEATURE_ID_DS_MP1CLK 13
+#define FEATURE_ID_DS_MPIOCLK 14
+#define FEATURE_ID_DS_SOCCLK 15
+#define FEATURE_ID_DS_VCN 16
+#define FEATURE_ID_PPT 17
+#define FEATURE_ID_TDC 18
+#define FEATURE_ID_THERMAL 19
+#define FEATURE_ID_SOC_PCC 20
+#define FEATURE_ID_PROCHOT 21
+#define FEATURE_ID_XVMIN0_VMIN_AID 22
+#define FEATURE_ID_XVMIN1_DD_AID 23
+#define FEATURE_ID_XVMIN0_VMIN_XCD 24
+#define FEATURE_ID_XVMIN1_DD_XCD 25
+#define FEATURE_ID_FW_CTF 26
+#define FEATURE_ID_MGCG 27
+#define FEATURE_ID_PSI7 28
+#define FEATURE_ID_XGMI_PER_LINK_PWR_DOWN 29
+#define FEATURE_ID_SOC_DC_RTC 30
+#define FEATURE_ID_GFX_DC_RTC 31
+#define FEATURE_ID_DVM_MIN_PSM 32
+#define FEATURE_ID_PRC 33
+#define FEATURE_ID_PSM_DIDT 34
+#define FEATURE_ID_PIT 35
+#define FEATURE_ID_DVO 36
+#define FEATURE_ID_XVMIN_CLKSTOP_DS 37
+#define FEATURE_ID_HBM_THROTTLE_CTRL 38
+#define FEATURE_ID_DPM_GL2CLK 39
+#define FEATURE_ID_GC_CAC_EDC 40
+#define FEATURE_ID_DS_DMABECLK 41
+#define FEATURE_ID_DS_MPIFOECLK 42
+#define FEATURE_ID_DS_MPRASCLK 43
+#define FEATURE_ID_DS_MPNHTCLK 44
+#define FEATURE_ID_DS_FIOCLK 45
+#define FEATURE_ID_DS_DXIOCLK 46
+#define FEATURE_ID_PCC 47
+#define FEATURE_ID_OCP 48
+#define FEATURE_ID_TRO 49
+#define FEATURE_ID_GL2_CAC_EDC 50
+#define FEATURE_ID_SPARE_51 51
+#define FEATURE_ID_GL2_CGCG 52
+#define FEATURE_ID_XCAC 53
+#define FEATURE_ID_DS_GL2CLK 54
+#define FEATURE_ID_FCS_VIN_PCC 55
+#define FEATURE_ID_FCS_VDDX_OCP_WARN 56
+#define FEATURE_ID_FCS_PWRBRK 57
+#define FEATURE_ID_DF_CSTATE 58
+#define FEATURE_ID_ARO 59
+#define FEATURE_ID_PS_PsPowerLimit 60
+#define FEATURE_ID_PS_PsPowerFloor 61
+#define FEATURE_ID_OCPWARNRC 62
+#define FEATURE_ID_XGMI_FOLDING 63
+#define FEATURE_ID_SMU_CG 64
+#define NUM_FEATURES 65
+
+//MGCG Feature ID List
+#define WAFL_CG 0
+#define SMU_FUSE_CG_DEEPSLEEP 1
+#define SMUIO_CG 2
+#define RSMU_MGCG 3
+#define SMU_CLK_MGCG 4
+#define MP5_CG 5
+#define UMC_CG 6
+#define WAFL0_CLK 7
+#define WAFL1_CLK 8
+#define VCN_MGCG 9
+#define GL2_MGCG 10
+#define MGCG_NUM_FEATURES 11
+
+/* enum for MPIO PCIe gen speed msgs */
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN6,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN6_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ GFX_GUARDBAND_OFFSET_0,
+ GFX_GUARDBAND_OFFSET_1,
+ GFX_GUARDBAND_OFFSET_2,
+ GFX_GUARDBAND_OFFSET_3,
+ GFX_GUARDBAND_OFFSET_4,
+ GFX_GUARDBAND_OFFSET_5,
+ GFX_GUARDBAND_OFFSET_6,
+ GFX_GUARDBAND_OFFSET_7,
+ GFX_GUARDBAND_OFFSET_COUNT
+} GFX_GUARDBAND_OFFSET_e;
+
+typedef enum {
+ GFX_DVM_MARGINHI_0,
+ GFX_DVM_MARGINHI_1,
+ GFX_DVM_MARGINHI_2,
+ GFX_DVM_MARGINHI_3,
+ GFX_DVM_MARGINHI_4,
+ GFX_DVM_MARGINHI_5,
+ GFX_DVM_MARGINHI_6,
+ GFX_DVM_MARGINHI_7,
+ GFX_DVM_MARGINLO_0,
+ GFX_DVM_MARGINLO_1,
+ GFX_DVM_MARGINLO_2,
+ GFX_DVM_MARGINLO_3,
+ GFX_DVM_MARGINLO_4,
+ GFX_DVM_MARGINLO_5,
+ GFX_DVM_MARGINLO_6,
+ GFX_DVM_MARGINLO_7,
+ GFX_DVM_MARGIN_COUNT
+} GFX_DVM_MARGIN_e;
+
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_PLANE_VDDCR_X0_TEMP,
+ SVI_PLANE_VDDCR_X1_TEMP,
+
+ SVI_PLANE_VDDIO_HBM_B_TEMP,
+ SVI_PLANE_VDDIO_HBM_D_TEMP,
+ SVI_PLANE_VDDIO_04_HBM_B_TEMP,
+ SVI_PLANE_VDDIO_04_HBM_D_TEMP,
+ SVI_PLANE_VDDCR_HBM_B_TEMP,
+ SVI_PLANE_VDDCR_HBM_D_TEMP,
+ SVI_PLANE_VDDCR_075_HBM_B_TEMP,
+ SVI_PLANE_VDDCR_075_HBM_D_TEMP,
+
+ SVI_PLANE_VDDIO_11_GTA_A_TEMP,
+ SVI_PLANE_VDDIO_11_GTA_C_TEMP,
+ SVI_PLANE_VDDAN_075_GTA_A_TEMP,
+ SVI_PLANE_VDDAN_075_GTA_C_TEMP,
+
+ SVI_PLANE_VDDCR_075_UCIE_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAA_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAM_A_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAM_C_TEMP,
+
+ SVI_PLANE_VDDCR_SOCIO_A_TEMP,
+ SVI_PLANE_VDDCR_SOCIO_C_TEMP,
+
+ SVI_PLANE_VDDAN_075_TEMP,
+ SVI_MAX_TEMP_ENTRIES, //22
+} SVI_TEMP_e;
+
+typedef enum{
+ SYSTEM_POWER_UBB_POWER,
+ SYSTEM_POWER_UBB_POWER_THRESHOLD,
+ SYSTEM_POWER_MAX_ENTRIES_WO_RESERVED,
+ SYSTEM_POWER_MAX_ENTRIES = 4
+} SYSTEM_POWER_e;
+
+#define SMU_METRICS_TABLE_VERSION 0xF
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t HbmTemperature[12];
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t HbmTemperatureAcc[12];
+ uint32_t MidTemperature[2];
+ uint32_t AidTemperature[2];
+ uint32_t XcdTemperature[8];
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency[2];
+ uint32_t UclkFrequency[2];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint32_t GfxclkFrequency[8];
+ uint32_t SocclkFrequency[2];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[2];
+
+ //XGMI:
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc;
+ uint64_t XgmiWriteBandwidthAcc;
+
+ //ACTIVITY:
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[2];
+
+ //THROTTLERS
+ uint64_t ProchotResidencyAcc;
+ uint64_t PptResidencyAcc;
+ uint64_t SocketThmResidencyAcc;
+ uint64_t VrThmResidencyAcc;
+ uint64_t HbmThmResidencyAcc;
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[2];
+ uint64_t PCIeL0ToRecoveryCountAcc;
+ uint64_t PCIenReplayAAcc;
+ uint64_t PCIenReplayARolloverCountAcc;
+ uint64_t PCIeNAKSentCountAcc;
+ uint64_t PCIeNAKReceivedCountAcc;
+ uint64_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[40];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //NVML-Parity: Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+} MetricsTable_t;
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
+
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; //Defaults to 0.
+ uint16_t NodeIdentifier;
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, 13 entries,
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+
+ uint16_t SystemPower[SYSTEM_POWER_MAX_ENTRIES]; // UBB Current Power and Power Threshold
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
+} VfMetricsTable_t;
+
+/* FRU product information */
+typedef struct __attribute__((aligned(4))) {
+ uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
+ uint8_t Name[PRODUCT_NAME_LEN];
+ uint8_t Serial[PRODUCT_SERIAL_LEN];
+ uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
+ uint8_t FruId[PRODUCT_FRU_ID_LEN];
+} FRUProductInfo_t;
+
+#define SMU_STATIC_METRICS_TABLE_VERSION 0x1
+
+#pragma pack(push, 4)
+typedef struct {
+ //FRU PRODUCT INFO
+ FRUProductInfo_t ProductInfo; //from i2c
+
+ //POWER
+ uint32_t MaxSocketPowerLimit;
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t MaxFclkFrequency;
+ uint32_t MinFclkFrequency;
+ uint32_t MaxGl2clkFrequency;
+ uint32_t MinGl2clkFrequency;
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+
+ //CTF limits
+ uint32_t CTFLimit_MID;
+ uint32_t CTFLimit_AID;
+ uint32_t CTFLimit_XCD;
+ uint32_t CTFLimit_HBM;
+
+ //Thermal Throttling limits
+ uint32_t ThermalLimit_MID;
+ uint32_t ThermalLimit_AID;
+ uint32_t ThermalLimit_XCD;
+ uint32_t ThermalLimit_HBM;
+
+ //PSNs
+ uint64_t PublicSerialNumber_MID[2];
+ uint64_t PublicSerialNumber_AID[2];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
+
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h
new file mode 100644
index 000000000000..7ffb445f4c0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_PPSMC_H
+#define SMU_15_0_8_PPSMC_H
+
+/* SMU Response Codes */
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+/* Message Definitions */
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_GetMetricsVersion 0x6
+#define PPSMC_MSG_GetMetricsTable 0x7
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x8
+#define PPSMC_MSG_SetDriverDramAddr 0x9 //ARG0: low address, ARG1: high address
+#define PPSMC_MSG_SetToolsDramAddr 0xA //ARG0: low address, ARG1: high address
+//#define PPSMC_MSG_SetSystemVirtualDramAddr 0xB
+#define PPSMC_MSG_SetSoftMaxByFreq 0xC
+#define PPSMC_MSG_SetPptLimit 0xD
+#define PPSMC_MSG_GetPptLimit 0xE
+#define PPSMC_MSG_DramLogSetDramAddr 0xF //ARG0: low address, ARG1: high address, ARG2: size
+#define PPSMC_MSG_HeavySBR 0x10
+#define PPSMC_MSG_DFCstateControl 0x11
+#define PPSMC_MSG_GfxDriverResetRecovery 0x12
+#define PPSMC_MSG_TriggerVFFLR 0x13
+#define PPSMC_MSG_SetSoftMinGfxClk 0x14
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x15
+#define PPSMC_MSG_PrepareForDriverUnload 0x16
+#define PPSMC_MSG_QueryValidMcaCount 0x17
+#define PPSMC_MSG_McaBankDumpDW 0x18
+#define PPSMC_MSG_ClearMcaOnRead 0x19
+#define PPSMC_MSG_QueryValidMcaCeCount 0x1A
+#define PPSMC_MSG_McaBankCeDumpDW 0x1B
+#define PPSMC_MSG_SelectPLPDMode 0x1C
+#define PPSMC_MSG_SetThrottlingPolicy 0x1D
+#define PPSMC_MSG_ResetSDMA 0x1E
+#define PPSMC_MSG_GetRasTableVersion 0x1F
+#define PPSMC_MSG_GetRmaStatus 0x20
+#define PPSMC_MSG_GetBadPageCount 0x21
+#define PPSMC_MSG_GetBadPageMcaAddress 0x22
+#define PPSMC_MSG_GetBadPagePaAddress 0x23
+#define PPSMC_MSG_SetTimestamp 0x24
+#define PPSMC_MSG_GetTimestamp 0x25
+#define PPSMC_MSG_GetRasPolicy 0x26
+#define PPSMC_MSG_GetBadPageIpIdLoHi 0x27
+#define PPSMC_MSG_EraseRasTable 0x28
+#define PPSMC_MSG_GetStaticMetricsTable 0x29
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x2A
+#define PPSMC_MSG_GetBadPageSeverity 0x2B
+#define PPSMC_MSG_GetSystemMetricsTable 0x2C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x2D
+#define PPSMC_MSG_ResetVCN 0x2E
+#define PPSMC_MSG_SetFastPptLimit 0x2F
+#define PPSMC_MSG_GetFastPptLimit 0x30
+#define PPSMC_MSG_SetSoftMinGl2clk 0x31
+#define PPSMC_MSG_SetSoftMaxGl2clk 0x32
+#define PPSMC_MSG_SetSoftMinFclk 0x33
+#define PPSMC_MSG_SetSoftMaxFclk 0x34
+#define PPSMC_Message_Count 0x35
+
+/* PSMC Reset Types for driver msg argument */
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+/* PLPD modes */
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 584c4cfd0c16..636ff90923d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -42,8 +42,10 @@
__SMU_DUMMY_MAP(SetPptLimit), \
__SMU_DUMMY_MAP(SetDriverDramAddrHigh), \
__SMU_DUMMY_MAP(SetDriverDramAddrLow), \
+ __SMU_DUMMY_MAP(SetDriverDramAddr), \
__SMU_DUMMY_MAP(SetToolsDramAddrHigh), \
__SMU_DUMMY_MAP(SetToolsDramAddrLow), \
+ __SMU_DUMMY_MAP(SetToolsDramAddr), \
__SMU_DUMMY_MAP(TransferTableSmu2Dram), \
__SMU_DUMMY_MAP(TransferTableDram2Smu), \
__SMU_DUMMY_MAP(UseDefaultPPTable), \
@@ -292,7 +294,12 @@
__SMU_DUMMY_MAP(AllowZstates), \
__SMU_DUMMY_MAP(GetSmartShiftStatus), \
__SMU_DUMMY_MAP(EnableLSdma), \
- __SMU_DUMMY_MAP(DisableLSdma),
+ __SMU_DUMMY_MAP(DisableLSdma), \
+ __SMU_DUMMY_MAP(InitializeGfx), \
+ __SMU_DUMMY_MAP(SetSoftMaxFclk), \
+ __SMU_DUMMY_MAP(SetSoftMaxGl2clk), \
+ __SMU_DUMMY_MAP(SetSoftMinGl2clk), \
+ __SMU_DUMMY_MAP(GetSystemMetricsVersion),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -324,6 +331,7 @@ enum smu_clk_type {
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
+ SMU_OD_FCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
SMU_OD_VDDGFX_OFFSET,
@@ -334,6 +342,7 @@ enum smu_clk_type {
SMU_OD_FAN_MINIMUM_PWM,
SMU_OD_FAN_ZERO_RPM_ENABLE,
SMU_OD_FAN_ZERO_RPM_STOP_TEMP,
+ SMU_GL2CLK,
SMU_CLK_COUNT,
};
@@ -472,6 +481,14 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
__SMU_DUMMY_MAP(FAN_ABNORMAL), \
__SMU_DUMMY_MAP(PIT), \
+ __SMU_DUMMY_MAP(DS_DMABECLK), \
+ __SMU_DUMMY_MAP(DS_MPIFOECLK), \
+ __SMU_DUMMY_MAP(DS_MPRASCLK), \
+ __SMU_DUMMY_MAP(DS_MPNHTCLK), \
+ __SMU_DUMMY_MAP(DS_FIOCLK), \
+ __SMU_DUMMY_MAP(DS_DXIOCLK), \
+ __SMU_DUMMY_MAP(DS_GL2CLK), \
+ __SMU_DUMMY_MAP(DPM_GL2CLK), \
__SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index 7c1701ed3e11..dd94e8a9e218 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -25,18 +25,6 @@
#include "amdgpu_smu.h"
-#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x17
-#define SMU11_DRIVER_IF_VERSION_NV10 0x37
-#define SMU11_DRIVER_IF_VERSION_NV12 0x38
-#define SMU11_DRIVER_IF_VERSION_NV14 0x38
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
-#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
-#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
-#define SMU11_DRIVER_IF_VERSION_Cyan_Skillfish 0x8
-
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -148,8 +136,6 @@ int smu_v11_0_setup_pptable(struct smu_context *smu);
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v11_0_check_fw_version(struct smu_context *smu);
-
int smu_v11_0_set_driver_table_location(struct smu_context *smu);
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index fd3937b08662..2346d9c6e162 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
@@ -35,8 +35,6 @@
int smu_v12_0_check_fw_status(struct smu_context *smu);
-int smu_v12_0_check_fw_version(struct smu_context *smu);
-
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index b0d6b7b0946d..89bbda0670ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -132,8 +132,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu);
int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v13_0_check_fw_version(struct smu_context *smu);
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu);
int smu_v13_0_set_tool_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index b453e6efc7c9..4eb40ff8aff2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -25,11 +25,6 @@
#include "amdgpu_smu.h"
-#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
-
#define FEATURE_MASK(feature) (1ULL << feature)
/* MP Apertures */
@@ -124,8 +119,6 @@ int smu_v14_0_setup_pptable(struct smu_context *smu);
int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v14_0_check_fw_version(struct smu_context *smu);
-
int smu_v14_0_set_driver_table_location(struct smu_context *smu);
int smu_v14_0_set_tool_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
index ab4a64f54e79..e6fd8be2cc4a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
@@ -41,7 +41,10 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
-#define MAX_DPM_LEVELS 16
+#define SMU15_DRIVER_IF_VERSION_SMU_V15_0_8 0x007D0000
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
#define MAX_PCIE_CONF 3
#define SMU15_TOOL_SIZE 0x19000
@@ -65,45 +68,28 @@ struct smu_15_0_max_sustainable_clocks {
uint32_t soc_clock;
};
-struct smu_15_0_dpm_clk_level {
- bool enabled;
- uint32_t value;
-};
-
-struct smu_15_0_dpm_table {
- uint32_t min; /* MHz */
- uint32_t max; /* MHz */
- uint32_t count;
- bool is_fine_grained;
- struct smu_15_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
-};
-
-struct smu_15_0_pcie_table {
- uint8_t pcie_gen[MAX_PCIE_CONF];
- uint8_t pcie_lane[MAX_PCIE_CONF];
- uint16_t clk_freq[MAX_PCIE_CONF];
- uint32_t num_of_link_levels;
-};
-
struct smu_15_0_dpm_tables {
- struct smu_15_0_dpm_table soc_table;
- struct smu_15_0_dpm_table gfx_table;
- struct smu_15_0_dpm_table uclk_table;
- struct smu_15_0_dpm_table eclk_table;
- struct smu_15_0_dpm_table vclk_table;
- struct smu_15_0_dpm_table dclk_table;
- struct smu_15_0_dpm_table dcef_table;
- struct smu_15_0_dpm_table pixel_table;
- struct smu_15_0_dpm_table display_table;
- struct smu_15_0_dpm_table phy_table;
- struct smu_15_0_dpm_table fclk_table;
- struct smu_15_0_pcie_table pcie_table;
+ struct smu_dpm_table soc_table;
+ struct smu_dpm_table gfx_table;
+ struct smu_dpm_table uclk_table;
+ struct smu_dpm_table eclk_table;
+ struct smu_dpm_table vclk_table;
+ struct smu_dpm_table dclk_table;
+ struct smu_dpm_table dcef_table;
+ struct smu_dpm_table pixel_table;
+ struct smu_dpm_table display_table;
+ struct smu_dpm_table phy_table;
+ struct smu_dpm_table fclk_table;
+ struct smu_pcie_table pcie_table;
+ struct smu_dpm_table gl2_table;
};
struct smu_15_0_dpm_context {
struct smu_15_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
+ uint32_t board_volt;
};
enum smu_15_0_power_state {
@@ -118,6 +104,7 @@ struct smu_15_0_power_context {
uint32_t power_source;
uint8_t in_power_limit_boost_mode;
enum smu_15_0_power_state power_state;
+ atomic_t throttle_status;
};
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
@@ -142,8 +129,6 @@ int smu_v15_0_setup_pptable(struct smu_context *smu);
int smu_v15_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v15_0_check_fw_version(struct smu_context *smu);
-
int smu_v15_0_set_driver_table_location(struct smu_context *smu);
int smu_v15_0_set_tool_table_location(struct smu_context *smu);
@@ -199,7 +184,7 @@ int smu_v15_0_set_power_source(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_15_0_dpm_table *single_dpm_table);
+ struct smu_dpm_table *single_dpm_table);
int smu_v15_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 74c818e3fbd0..54d3dba7d354 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -65,6 +65,8 @@
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x17
+
static const struct smu_feature_bits arcturus_dpm_features = {
.bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1905,7 +1907,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* pptable related */
.setup_pptable = arcturus_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -1958,5 +1960,6 @@ void arcturus_set_ppt_funcs(struct smu_context *smu)
smu->table_map = arcturus_table_map;
smu->pwr_src_map = arcturus_pwr_src_map;
smu->workload_map = arcturus_workload_map;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
smu_v11_0_init_msg_ctl(smu, arcturus_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 4e70308a455e..e6e009df9840 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -582,7 +582,7 @@ cyan_skillfish_get_enabled_mask(struct smu_context *smu,
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
@@ -605,5 +605,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = MP1_DRIVER_IF_VERSION;
smu_v11_0_init_msg_ctl(smu, cyan_skillfish_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 163e09ca0730..cd0457e13f54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -73,6 +73,10 @@ static const struct smu_feature_bits navi10_dpm_features = {
#define SMU_11_0_GFX_BUSY_THRESHOLD 15
+#define SMU11_DRIVER_IF_VERSION_NV10 0x37
+#define SMU11_DRIVER_IF_VERSION_NV12 0x38
+#define SMU11_DRIVER_IF_VERSION_NV14 0x38
+
static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -3308,7 +3312,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.setup_pptable = navi10_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -3361,11 +3365,26 @@ static const struct pptable_funcs navi10_ppt_funcs = {
void navi10_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &navi10_ppt_funcs;
smu->clock_map = navi10_clk_map;
smu->feature_map = navi10_feature_mask_map;
smu->table_map = navi10_table_map;
smu->pwr_src_map = navi10_pwr_src_map;
smu->workload_map = navi10_workload_map;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ break;
+ case IP_VERSION(11, 0, 9):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
+ case IP_VERSION(11, 0, 5):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ break;
+ }
+
smu_v11_0_init_msg_ctl(smu, navi10_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index cf030af18aad..f799e489b481 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3119,7 +3119,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.setup_pptable = sienna_cichlid_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -3176,13 +3176,36 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode2_reset = sienna_cichlid_mode2_reset,
};
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
+#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
+
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &sienna_cichlid_ppt_funcs;
smu->clock_map = sienna_cichlid_clk_map;
smu->feature_map = sienna_cichlid_feature_mask_map;
smu->table_map = sienna_cichlid_table_map;
smu->pwr_src_map = sienna_cichlid_pwr_src_map;
smu->workload_map = sienna_cichlid_workload_map;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 7):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
+ break;
+ case IP_VERSION(11, 0, 11):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
+ break;
+ case IP_VERSION(11, 0, 12):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
+ break;
+ case IP_VERSION(11, 0, 13):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
+ break;
+ }
+
smu_v11_0_init_msg_ctl(smu, sienna_cichlid_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 7ca8fdd23206..d68ceee16d8f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -192,81 +192,6 @@ int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v11_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(11, 0, 0):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
- break;
- case IP_VERSION(11, 0, 9):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
- break;
- case IP_VERSION(11, 0, 5):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
- break;
- case IP_VERSION(11, 0, 7):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
- break;
- case IP_VERSION(11, 0, 11):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
- break;
- case IP_VERSION(11, 5, 0):
- case IP_VERSION(11, 5, 2):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
- break;
- case IP_VERSION(11, 0, 12):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
- break;
- case IP_VERSION(11, 0, 13):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
- break;
- case IP_VERSION(11, 0, 8):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
- break;
- case IP_VERSION(11, 0, 2):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
- break;
- default:
- dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
- amdgpu_ip_version(adev, MP1_HWIP, 0));
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
- break;
- }
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5eabaf55dfc5..d269b505aefb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -2511,7 +2511,7 @@ static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entr
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = vangogh_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -2561,5 +2561,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->table_map = vangogh_table_map;
smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION;
smu_v11_0_init_msg_ctl(smu, vangogh_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 186020ed6708..75335da224c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1457,7 +1457,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.get_power_profile_mode = renoir_get_power_profile_mode,
.read_sensor = renoir_read_sensor,
.check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index ac5e44dff6c9..f09da4d14510 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -70,42 +70,6 @@ int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v12_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!smu->is_apu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 2b4faab37693..dc056f1e4b64 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1988,7 +1988,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
/* pptable related */
.setup_pptable = aldebaran_setup_pptable,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 447a9c26bb77..be9a7a32de99 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -258,48 +258,6 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v13_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- adev->pm.fw_version = smu_version;
-
- /* only for dGPU w/ SMU13*/
- if (adev->pm.fw)
- dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
- if_version != smu->smc_driver_if_version) {
- dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_info(adev->dev, "SMU driver if version not matched\n");
- }
-
- return ret;
-}
-
static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
@@ -2508,4 +2466,6 @@ void smu_v13_0_reset_custom_level(struct smu_context *smu)
pstate_table->uclk_pstate.custom.max = 0;
pstate_table->gfxclk_pstate.custom.min = 0;
pstate_table->gfxclk_pstate.custom.max = 0;
+ pstate_table->fclk_pstate.custom.min = 0;
+ pstate_table->fclk_pstate.custom.max = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 554f616328c3..0a7f5fa3c1d3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -773,13 +773,13 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxclkFrequencyPreDs;
break;
case METRICS_AVERAGE_FCLK:
- if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_0_BUSY_THRESHOLD)
*value = metrics->AverageFclkFrequencyPostDs;
else
*value = metrics->AverageFclkFrequencyPreDs;
break;
case METRICS_AVERAGE_UCLK:
- if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_0_BUSY_THRESHOLD)
*value = metrics->AverageMemclkFrequencyPostDs;
else
*value = metrics->AverageMemclkFrequencyPreDs;
@@ -800,7 +800,7 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxActivity;
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = metrics->AverageUclkActivity;
+ *value = smu_safe_u16_nn(metrics->AverageUclkActivity);
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = max(metrics->Vcn0ActivityPercentage,
@@ -2085,7 +2085,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
- gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_umc_activity = smu_safe_u16_nn(metrics->AverageUclkActivity);
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
@@ -2102,7 +2102,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
else
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
- if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_0_BUSY_THRESHOLD)
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
else
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
@@ -3164,7 +3164,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
.setup_pptable = smu_v13_0_0_setup_pptable,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.system_features_control = smu_v13_0_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 32d5e2170d80..fe929bd89058 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -49,6 +49,13 @@
#undef pr_info
#undef pr_debug
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0x3) == 0x3)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 2, (stack_idx)++) \
+
#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
[smu_feature] = { 1, (smu_13_0_12_feature) }
@@ -262,8 +269,9 @@ static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
int ret;
if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
- max_width = (uint8_t)static_metrics->MaxXgmiWidth;
- max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ max_width = (uint8_t)SMUQ10_ROUND(static_metrics->MaxXgmiWidth);
+ max_speed =
+ (uint16_t)SMUQ10_ROUND(static_metrics->MaxXgmiBitrate);
ret = 0;
} else {
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
@@ -471,9 +479,14 @@ static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
}
amdgpu_hdp_invalidate(smu->adev, NULL);
+
+ ret = smu_cmn_vram_cpy(smu, sys_table->cache.buffer,
+ table->cpu_addr,
+ smu_v13_0_12_get_system_metrics_size());
+ if (ret)
+ return ret;
+
smu_table_cache_update_time(sys_table, jiffies);
- memcpy(sys_table->cache.buffer, table->cpu_addr,
- smu_v13_0_12_get_system_metrics_size());
return 0;
}
@@ -834,7 +847,7 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
+ int ret = 0, xcc_id, inst, i, j, idx;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
@@ -849,6 +862,31 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
gpu_metrics->temperature_vrsoc =
SMUQ10_ROUND(metrics->MaxVrTemperature);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM))) {
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask) / 2 > SMU_13_0_6_MAX_HBM_STACKS)) {
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ } else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ metrics->HbmTemperature[stack_idx];
+ }
+ }
+ }
+ idx = 0;
+ for_each_inst(i, adev->aid_mask) {
+ gpu_metrics->temperature_aid[idx] = metrics->AidTemperature[i];
+ idx++;
+ }
+ }
+
gpu_metrics->average_gfx_activity =
SMUQ10_ROUND(metrics->SocketGfxBusy);
gpu_metrics->average_umc_activity =
@@ -964,6 +1002,9 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
[i] = SMUQ10_ROUND(
metrics->GfxclkBelowHostLimitTotalAcc[inst]);
}
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM)))
+ gpu_metrics->temperature_xcd[i] = metrics->XcdTemperature[inst];
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 5b1a038d6a19..ba91bf590eed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -1098,7 +1098,7 @@ static int smu_v13_0_4_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.check_fw_status = smu_v13_0_check_fw_status,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = smu_v13_0_4_init_smc_tables,
.fini_smc_tables = smu_v13_0_4_fini_smc_tables,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index d534723fef91..27372a60e83d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -1102,7 +1102,7 @@ static int smu_v13_0_5_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
static const struct pptable_funcs smu_v13_0_5_ppt_funcs = {
.check_fw_status = smu_v13_0_check_fw_status,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = smu_v13_0_5_init_smc_tables,
.fini_smc_tables = smu_v13_0_5_fini_smc_tables,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 870bcc86fd79..cd0a23f432ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -373,6 +373,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
} else {
smu_v13_0_12_tables_fini(smu);
}
+
+ if (fw_ver >= 0x04561000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_AID_XCD_HBM));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -458,6 +461,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 4 && fw_ver >= 0x04557100) ||
(pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -478,7 +482,7 @@ static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
{
int r;
- r = smu_v13_0_check_fw_version(smu);
+ r = smu_cmn_check_fw_version(smu);
/* Initialize caps flags once fw version is fetched */
if (!r)
smu_v13_0_x_init_caps(smu);
@@ -774,7 +778,10 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
}
amdgpu_hdp_invalidate(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+ ret = smu_cmn_vram_cpy(smu, smu_table->metrics_table,
+ table->cpu_addr, table_size);
+ if (ret)
+ return ret;
smu_table->metrics_time = jiffies;
}
@@ -853,9 +860,9 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
}
amdgpu_hdp_invalidate(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
- return 0;
+ return smu_cmn_vram_cpy(smu, smu_table->metrics_table,
+ table->cpu_addr, table_size);
}
static void smu_v13_0_6_update_caps(struct smu_context *smu)
@@ -1196,6 +1203,7 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
@@ -1213,6 +1221,12 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
+ pstate_table->fclk_pstate.curr.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.curr.max = SMU_DPM_TABLE_MAX(fclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
+
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
@@ -1398,7 +1412,15 @@ static int smu_v13_0_6_emit_clk_levels(struct smu_context *smu,
pstate_table->uclk_pstate.curr.min,
pstate_table->uclk_pstate.curr.max);
break;
+ case SMU_OD_FCLK:
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT))
+ return -EOPNOTSUPP;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_FCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->fclk_pstate.curr.min,
+ pstate_table->fclk_pstate.curr.max);
+ break;
case SMU_SCLK:
case SMU_GFXCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
@@ -2040,7 +2062,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
int ret = 0;
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
- clk_type != SMU_UCLK)
+ clk_type != SMU_UCLK && clk_type != SMU_FCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
@@ -2081,6 +2103,15 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
pstate_table->uclk_pstate.curr.max = max;
}
+ if (clk_type == SMU_FCLK) {
+ if (max == pstate_table->fclk_pstate.curr.max)
+ return 0;
+
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, 0, max, false);
+ if (!ret)
+ pstate_table->fclk_pstate.curr.max = max;
+ }
+
return ret;
}
@@ -2123,6 +2154,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
uint32_t min_clk;
uint32_t max_clk;
@@ -2203,6 +2235,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
pstate_table->uclk_pstate.custom.max = input[1];
}
break;
+ case PP_OD_EDIT_FCLK_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu,
+ SMU_FEATURE_DPM_FCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "FCLK limits setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min FCLK level is not supported\n");
+ return -EOPNOTSUPP;
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum FCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->fclk_pstate.custom.max =
+ pstate_table->fclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->fclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
@@ -2232,6 +2298,17 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
}
+
+ if (SMU_DPM_TABLE_MAX(fclk_table) !=
+ pstate_table->fclk_pstate.curr.max) {
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.fclk_table);
+ ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
+ SMU_FCLK, min_clk,
+ max_clk, false);
+ if (ret)
+ return ret;
+ }
smu_v13_0_reset_custom_level(smu);
}
break;
@@ -2258,6 +2335,16 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
+ if (pstate_table->fclk_pstate.custom.max) {
+ min_clk = pstate_table->fclk_pstate.curr.min;
+ max_clk = pstate_table->fclk_pstate.custom.max;
+ ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
+ SMU_FCLK, min_clk,
+ max_clk, false);
+ if (ret)
+ return ret;
+ }
+
if (!pstate_table->uclk_pstate.custom.max)
return 0;
@@ -2320,13 +2407,15 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
- memcpy(table->cpu_addr, table_data, table_size);
+ ret = smu_cmn_vram_cpy(smu, table->cpu_addr, table_data, table_size);
+ if (ret)
+ return ret;
+
/* Flush hdp cache */
amdgpu_hdp_flush(adev, NULL);
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
- NULL);
- return ret;
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
+ NULL);
}
static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -3163,14 +3252,25 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
{
+ struct amdgpu_device *adev = smu->adev;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
switch (msg) {
case SMU_MSG_QueryValidMcaCount:
case SMU_MSG_QueryValidMcaCeCount:
case SMU_MSG_McaBankDumpDW:
case SMU_MSG_McaBankCeDumpDW:
case SMU_MSG_ClearMcaOnRead:
+ case SMU_MSG_GetRASTableVersion:
+ case SMU_MSG_GetBadPageCount:
+ case SMU_MSG_GetBadPageMcaAddr:
+ case SMU_MSG_SetTimestamp:
+ case SMU_MSG_GetTimestamp:
+ case SMU_MSG_GetBadPageIpid:
+ case SMU_MSG_EraseRasTable:
ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index ffb06564f830..a150fc88902c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -78,6 +78,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(RAS_EEPROM),
SMU_CAP(FAST_PPT),
SMU_CAP(SYSTEM_POWER_METRICS),
+ SMU_CAP(TEMP_AID_XCD_HBM),
SMU_CAP(ALL),
};
@@ -87,6 +88,8 @@ enum smu_v13_0_6_caps {
#define SMU_13_0_6_MAX_XCC 8
#define SMU_13_0_6_MAX_VCN 4
#define SMU_13_0_6_MAX_JPEG 40
+#define SMU_13_0_6_MAX_AID 4
+#define SMU_13_0_6_MAX_HBM_STACKS 8
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
@@ -222,7 +225,15 @@ extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
SMU_13_0_6_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
- SMU_13_0_6_MAX_XCC);
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_13_0_6_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_13_0_6_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_13_0_6_MAX_XCC); \
+
DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f331e87858c9..5abf2b0703c6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -783,13 +783,13 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxclkFrequencyPreDs;
break;
case METRICS_AVERAGE_FCLK:
- if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_7_BUSY_THRESHOLD)
*value = metrics->AverageFclkFrequencyPostDs;
else
*value = metrics->AverageFclkFrequencyPreDs;
break;
case METRICS_AVERAGE_UCLK:
- if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_7_BUSY_THRESHOLD)
*value = metrics->AverageMemclkFrequencyPostDs;
else
*value = metrics->AverageMemclkFrequencyPreDs;
@@ -814,7 +814,7 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxActivity;
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = metrics->AverageUclkActivity;
+ *value = smu_safe_u16_nn(metrics->AverageUclkActivity);
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->AverageSocketPower << 8;
@@ -2091,7 +2091,7 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
- gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_umc_activity = smu_safe_u16_nn(metrics->AverageUclkActivity);
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
@@ -2104,7 +2104,7 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
else
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
- if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_13_0_7_BUSY_THRESHOLD)
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
else
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
@@ -2819,7 +2819,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
.setup_pptable = smu_v13_0_7_setup_pptable,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.system_features_control = smu_v13_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index f43a91ac6970..7bf88ffd311b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1331,7 +1331,7 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
static const struct pptable_funcs yellow_carp_ppt_funcs = {
.check_fw_status = smu_v13_0_check_fw_status,
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = yellow_carp_init_smc_tables,
.fini_smc_tables = yellow_carp_fini_smc_tables,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index e38354c694c9..d0a8df1aa6b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -229,66 +229,6 @@ int smu_v14_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v14_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 0):
- case IP_VERSION(14, 0, 4):
- case IP_VERSION(14, 0, 5):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
- break;
- case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
- break;
- case IP_VERSION(14, 0, 2):
- case IP_VERSION(14, 0, 3):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
- break;
- default:
- dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- amdgpu_ip_version(adev, MP1_HWIP, 0));
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
- break;
- }
-
- if (adev->pm.fw)
- dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 2353524b8821..a28624d4847a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -65,6 +65,9 @@
#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+
#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700
#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678
#define SMU_14_0_0_UMD_PSTATE_FCLK 1800
@@ -1699,7 +1702,7 @@ static int smu_v14_0_0_restore_user_od_settings(struct smu_context *smu)
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
- .check_fw_version = smu_v14_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = smu_v14_0_0_init_smc_tables,
.fini_smc_tables = smu_v14_0_0_fini_smc_tables,
.get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
@@ -1750,10 +1753,23 @@ static void smu_v14_0_0_init_msg_ctl(struct smu_context *smu)
void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v14_0_0_ppt_funcs;
smu->feature_map = smu_v14_0_0_feature_mask_map;
smu->table_map = smu_v14_0_0_table_map;
smu->is_apu = true;
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+ break;
+ }
+
smu_v14_0_0_init_msg_ctl(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index c3ebfac062a7..62514e3ac600 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -68,6 +68,8 @@ static const struct smu_feature_bits smu_v14_0_2_dpm_features = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) }
};
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
+
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
@@ -659,13 +661,13 @@ static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxclkFrequencyPreDs;
break;
case METRICS_AVERAGE_FCLK:
- if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_14_0_2_BUSY_THRESHOLD)
*value = metrics->AverageFclkFrequencyPostDs;
else
*value = metrics->AverageFclkFrequencyPreDs;
break;
case METRICS_AVERAGE_UCLK:
- if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_14_0_2_BUSY_THRESHOLD)
*value = metrics->AverageMemclkFrequencyPostDs;
else
*value = metrics->AverageMemclkFrequencyPreDs;
@@ -686,7 +688,7 @@ static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageGfxActivity;
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = metrics->AverageUclkActivity;
+ *value = smu_safe_u16_nn(metrics->AverageUclkActivity);
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = max(metrics->AverageVcn0ActivityPercentage,
@@ -2145,7 +2147,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
- gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_umc_activity = smu_safe_u16_nn(metrics->AverageUclkActivity);
gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
@@ -2157,7 +2159,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
else
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
- if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ if (smu_safe_u16_nn(metrics->AverageUclkActivity) <= SMU_14_0_2_BUSY_THRESHOLD)
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
else
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
@@ -2798,7 +2800,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.fini_power = smu_v14_0_fini_power,
.check_fw_status = smu_v14_0_check_fw_status,
.setup_pptable = smu_v14_0_2_setup_pptable,
- .check_fw_version = smu_v14_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.system_features_control = smu_v14_0_system_features_control,
.set_allowed_mask = smu_v14_0_set_allowed_mask,
@@ -2863,5 +2865,6 @@ void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v14_0_2_table_map;
smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
smu->workload_map = smu_v14_0_2_workload_map;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
smu_v14_0_2_init_msg_ctl(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
index 7f59a0aabdeb..fa083ad46c0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o
+SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o smu_v15_0_8_ppt.o
AMD_SWSMU_SMU15MGR = $(addprefix $(AMD_SWSMU_PATH)/smu15/,$(SMU15_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
index 3fd84dd85e9b..c3cb36813806 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
@@ -207,58 +207,6 @@ int smu_v15_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v15_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(15, 0, 0):
- smu->smc_driver_if_version = SMU15_DRIVER_IF_VERSION_SMU_V15_0;
- break;
- default:
- dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- amdgpu_ip_version(adev, MP1_HWIP, 0));
- smu->smc_driver_if_version = SMU15_DRIVER_IF_VERSION_INV;
- break;
- }
-
- if (adev->pm.fw)
- dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_info(adev->dev, "SMU driver if version not matched\n");
- }
-
- return ret;
-}
-
static int smu_v15_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
@@ -641,71 +589,52 @@ int smu_v15_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
- int ret = 0;
- uint64_t address;
- uint32_t address_low, address_high;
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_DramLogSetDramAddr,
+ .num_args = 3,
+ .num_out_args = 0,
+ };
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
- return ret;
-
- address = memory_pool->mc_address;
- address_high = (uint32_t)upper_32_bits(address);
- address_low = (uint32_t)lower_32_bits(address);
+ return 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high, NULL);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low, NULL);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size, NULL);
- if (ret)
- return ret;
+ /* SMU_MSG_DramLogSetDramAddr: ARG0=low, ARG1=high, ARG2=size */
+ args.args[0] = lower_32_bits(memory_pool->mc_address);
+ args.args[1] = upper_32_bits(memory_pool->mc_address);
+ args.args[2] = (u32)memory_pool->size;
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
- int ret = 0;
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_SetDriverDramAddr,
+ .num_args = 2,
+ .num_out_args = 0,
+ };
- if (driver_table->mc_address) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address),
- NULL);
- if (!ret)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address),
- NULL);
- }
+ args.args[0] = lower_32_bits(driver_table->mc_address);
+ args.args[1] = upper_32_bits(driver_table->mc_address);
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_tool_table_location(struct smu_context *smu)
{
- int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_SetToolsDramAddr,
+ .num_args = 2,
+ .num_out_args = 0,
+ };
- if (tool_table->mc_address) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address),
- NULL);
- if (!ret)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address),
- NULL);
- }
+ /* SMU_MSG_SetToolsDramAddr: ARG0=low, ARG1=high */
+ args.args[0] = lower_32_bits(tool_table->mc_address);
+ args.args[1] = upper_32_bits(tool_table->mc_address);
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_allowed_mask(struct smu_context *smu)
@@ -752,8 +681,7 @@ int smu_v15_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-int smu_v15_0_system_features_control(struct smu_context *smu,
- bool en)
+int smu_v15_0_system_features_control(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
@@ -957,7 +885,8 @@ static int smu_v15_0_wait_for_reset_complete(struct smu_context *smu,
return ret;
}
-int smu_v15_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+int smu_v15_0_wait_for_event(struct smu_context *smu,
+ enum smu_event_type event,
uint64_t event_arg)
{
int ret = -EINVAL;
@@ -1129,18 +1058,12 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
{
struct smu_15_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_15_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_15_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_15_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_15_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_15_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_15_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
@@ -1155,34 +1078,34 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- sclk_min = sclk_max = gfx_table->max;
- mclk_min = mclk_max = mem_table->max;
- socclk_min = socclk_max = soc_table->max;
- vclk_min = vclk_max = vclk_table->max;
- dclk_min = dclk_max = dclk_table->max;
- fclk_min = fclk_max = fclk_table->max;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- sclk_min = sclk_max = gfx_table->min;
- mclk_min = mclk_max = mem_table->min;
- socclk_min = socclk_max = soc_table->min;
- vclk_min = vclk_max = vclk_table->min;
- dclk_min = dclk_max = dclk_table->min;
- fclk_min = fclk_max = fclk_table->min;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MIN(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MIN(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MIN(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MIN(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MIN(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MIN(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- sclk_min = gfx_table->min;
- sclk_max = gfx_table->max;
- mclk_min = mem_table->min;
- mclk_max = mem_table->max;
- socclk_min = soc_table->min;
- socclk_max = soc_table->max;
- vclk_min = vclk_table->min;
- vclk_max = vclk_table->max;
- dclk_min = dclk_table->min;
- dclk_max = dclk_table->max;
- fclk_min = fclk_table->min;
- fclk_max = fclk_table->max;
+ sclk_min = SMU_DPM_TABLE_MIN(gfx_table);
+ sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = SMU_DPM_TABLE_MIN(mem_table);
+ mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = SMU_DPM_TABLE_MIN(soc_table);
+ socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = SMU_DPM_TABLE_MIN(vclk_table);
+ vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = SMU_DPM_TABLE_MIN(dclk_table);
+ dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = SMU_DPM_TABLE_MIN(fclk_table);
+ fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1404,10 +1327,11 @@ static int smu_v15_0_get_fine_grained_status(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_15_0_dpm_table *single_dpm_table)
+ struct smu_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
+ bool is_fine_grained;
int i;
ret = smu_v15_0_get_dpm_level_count(smu,
@@ -1420,12 +1344,15 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
ret = smu_v15_0_get_fine_grained_status(smu,
clk_type,
- &single_dpm_table->is_fine_grained);
+ &is_fine_grained);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
return ret;
}
+ if (is_fine_grained)
+ single_dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED;
+
for (i = 0; i < single_dpm_table->count; i++) {
ret = smu_v15_0_get_dpm_freq_by_index(smu,
clk_type,
@@ -1438,11 +1365,6 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
-
- if (i == 0)
- single_dpm_table->min = clk;
- else if (i == single_dpm_table->count - 1)
- single_dpm_table->max = clk;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
index 49cf2b9d931e..8d092c347076 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
@@ -1416,7 +1416,7 @@ static int smu_v15_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
static const struct pptable_funcs smu_v15_0_0_ppt_funcs = {
.check_fw_status = smu_v15_0_check_fw_status,
- .check_fw_version = smu_v15_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = smu_v15_0_0_init_smc_tables,
.fini_smc_tables = smu_v15_0_0_fini_smc_tables,
.get_vbios_bootup_values = smu_v15_0_get_vbios_bootup_values,
@@ -1468,6 +1468,7 @@ void smu_v15_0_0_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = smu_v15_0_0_feature_mask_map;
smu->table_map = smu_v15_0_0_table_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = SMU15_DRIVER_IF_VERSION_SMU_V15_0;
smu_v15_0_0_init_msg_ctl(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
new file mode 100644
index 000000000000..db85186f2d66
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
@@ -0,0 +1,2272 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v15_0_8_pmfw.h"
+#include "smu15_driver_if_v15_0_8.h"
+#include "smu_v15_0_8_ppsmc.h"
+#include "smu_v15_0_8_ppt.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+#include "mp/mp_15_0_8_offset.h"
+#include "mp/mp_15_0_8_sh_mask.h"
+#include "smu_v15_0.h"
+#include "amdgpu_fru_eeprom.h"
+
+#undef MP1_Public
+
+/* address block */
+#define MP1_Public 0x03b00000
+#define smnMP1_FIRMWARE_FLAGS_15_0_8 0x3010024
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0xF) == 0xF)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 4, (stack_idx)++) \
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_15_0_8_FEA_MAP(smu_feature, smu_15_0_8_feature) \
+ [smu_feature] = { 1, (smu_15_0_8_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+static const struct smu_feature_bits smu_v15_0_8_dpm_features = {
+ .bits = { SMU_FEATURE_BIT_INIT(FEATURE_ID_DATA_CALCULATION),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_GFXCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_UCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_FCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_GL2CLK) }
+};
+
+static const struct cmn2asic_msg_mapping smu_v15_0_8_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
+ MSG_MAP(SetDriverDramAddr, PPSMC_MSG_SetDriverDramAddr, 1),
+ MSG_MAP(SetToolsDramAddr, PPSMC_MSG_SetToolsDramAddr, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0),
+ MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0),
+ MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0),
+ MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0),
+ MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0),
+ MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsVersion, PPSMC_MSG_GetSystemMetricsVersion, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 0),
+ MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 0),
+ MSG_MAP(SetSoftMinGl2clk, PPSMC_MSG_SetSoftMinGl2clk, 0),
+ MSG_MAP(SetSoftMaxGl2clk, PPSMC_MSG_SetSoftMaxGl2clk, 0),
+ MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
+ MSG_MAP(SetSoftMaxFclk, PPSMC_MSG_SetSoftMaxFclk, 0),
+};
+
+/* TODO: Update the clk map once enum PPCLK is updated in smu15_driver_if_v15_0_8.h */
+static struct cmn2asic_mapping smu_v15_0_8_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(UCLK, PPCLK_UCLK),
+};
+
+static const struct cmn2asic_mapping smu_v15_0_8_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_ID_DATA_CALCULATION),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_ID_DPM_GFXCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_ID_DPM_UCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_ID_DPM_FCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_GL2CLK_BIT, FEATURE_ID_DPM_GL2CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_ID_DS_GFXCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_ID_DS_SOCCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_ID_DS_LCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_ID_DS_FCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_DMABECLK_BIT, FEATURE_ID_DS_DMABECLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPIFOECLK_BIT, FEATURE_ID_DS_MPIFOECLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPRASCLK_BIT, FEATURE_ID_DS_MPRASCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPNHTCLK_BIT, FEATURE_ID_DS_MPNHTCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_FIOCLK_BIT, FEATURE_ID_DS_FIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_DXIOCLK_BIT, FEATURE_ID_DS_DXIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_GL2CLK_BIT, FEATURE_ID_DS_GL2CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_ID_PPT),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_ID_TDC),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_ID_SMU_CG),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_ID_FW_CTF),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_ID_THERMAL),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_ID_SOC_PCC),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_ID_XGMI_PER_LINK_PWR_DOWN),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_ID_DS_VCN),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_ID_DS_MP1CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_ID_DS_MPIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_ID_DS_MP0CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_ID_PIT),
+};
+
+#define TABLE_PMSTATUSLOG 0
+#define TABLE_SMU_METRICS 1
+#define TABLE_I2C_COMMANDS 2
+#define TABLE_COUNT 3
+
+static const struct cmn2asic_mapping smu_v15_0_8_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(I2C_COMMANDS),
+};
+
+static size_t smu_v15_0_8_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
+static int smu_v15_0_8_tables_init(struct smu_context *smu)
+{
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics;
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ int ret, gpu_metrcs_size = sizeof(MetricsTable_t);
+ struct smu_table *tables = smu_table->tables;
+ struct smu_v15_0_8_gpu_metrics *gpu_metrics;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU15_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ gpu_metrcs_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v15_0_8_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(gpu_metrcs_size, GFP_KERNEL);
+ if (!metrics_table)
+ return -ENOMEM;
+
+ smu_table->metrics_time = 0;
+
+ driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
+ return -ENOMEM;
+
+ ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPU_METRICS,
+ sizeof(struct smu_v15_0_8_gpu_metrics),
+ SMU_GPU_METRICS_CACHE_INTERVAL);
+ if (ret)
+ return ret;
+
+ gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPU_METRICS);
+ smu_v15_0_8_gpu_metrics_init(gpu_metrics, 1, 9);
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v15_0_8_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ /* Initialize base board temperature metrics */
+ ret = smu_driver_table_init(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ baseboard_temp_metrics = (struct smu_v15_0_8_baseboard_temp_metrics *)
+ smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_v15_0_8_baseboard_temp_metrics_init(baseboard_temp_metrics, 1, 1);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_driver_table_fini(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ gpuboard_temp_metrics = (struct smu_v15_0_8_gpuboard_temp_metrics *)
+ smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_v15_0_8_gpuboard_temp_metrics_init(gpuboard_temp_metrics, 1, 1);
+
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
+ mutex_init(&smu_table->metrics_lock);
+
+ return 0;
+}
+
+static int smu_v15_0_8_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context =
+ kzalloc(sizeof(struct smu_15_0_dpm_context), GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+ smu_dpm->dpm_context_size = sizeof(struct smu_15_0_dpm_context);
+
+ smu_dpm->dpm_policies =
+ kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL);
+ if (!smu_dpm->dpm_policies) {
+ kfree(smu_dpm->dpm_context);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v15_0_8_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_8_allocate_dpm_context(smu);
+
+ return ret;
+}
+
+static int smu_v15_0_8_tables_fini(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ smu_driver_table_fini(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_driver_table_fini(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ mutex_destroy(&smu_table->metrics_lock);
+
+ return 0;
+}
+
+static int smu_v15_0_8_fini_smc_tables(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_v15_0_8_tables_fini(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_fini_smc_tables(smu);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v15_0_8_init_allowed_features(struct smu_context *smu)
+{
+ /* pptable will handle the features to enable */
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_metrics_table_internal(struct smu_context *smu, uint32_t tmo, void *data)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ struct amdgpu_device *adev = smu->adev;
+
+ mutex_lock(&smu_table->metrics_lock);
+
+ if (!tmo || !smu_table->metrics_time ||
+ time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(tmo))) {
+ int ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
+ if (ret) {
+ dev_info(adev->dev,
+ "Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu_table->metrics_lock);
+ return ret;
+ }
+
+ amdgpu_device_invalidate_hdp(smu->adev, NULL);
+ ret = smu_cmn_vram_cpy(smu, smu_table->metrics_table,
+ table->cpu_addr, table_size);
+ if (ret) {
+ mutex_unlock(&smu_table->metrics_lock);
+ return ret;
+ }
+
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (data)
+ memcpy(data, smu_table->metrics_table, table_size);
+ mutex_unlock(&smu_table->metrics_lock);
+ return 0;
+}
+
+static int smu_v15_0_8_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, xcc_id;
+
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 10, NULL);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_ROUND(metrics->UclkFrequency[0]);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_ROUND(metrics->FclkFrequency[0]);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_CURR_SOCKETPOWER:
+ *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ {
+ struct amdgpu_device *adev = smu->adev;
+ u32 max_hbm_temp = 0;
+
+ /* Find max temperature across all HBM stacks */
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int stack_idx;
+
+ for_each_hbm_stack(stack_idx, mask) {
+ u32 temp;
+
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+
+ temp = SMUQ10_ROUND(metrics->HbmTemperature[stack_idx]);
+ if (temp > max_hbm_temp)
+ max_hbm_temp = temp;
+ }
+ }
+ *value = max_hbm_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ }
+ /* This is the max of all VRs and not just SOC VR.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v15_0_8_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v15_0_8_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_MEMACTIVITY, value);
+ break;
+ default:
+ dev_err(smu->adev->dev,
+ "Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_MEM, value);
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+
+ ret = smu_cmn_vram_cpy(smu, sys_table->cache.buffer,
+ table->cpu_addr,
+ sizeof(SystemMetricsTable_t));
+ if (ret)
+ return ret;
+
+ smu_table_cache_update_time(sys_table, jiffies);
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ /*TBD as of now put 0 */
+ *value = 0;
+ return 0;
+ }
+
+ ret = smu_v15_0_8_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor, void *data,
+ uint32_t *size)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ int ret = 0;
+
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v15_0_8_get_current_activity_percent(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v15_0_8_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu,
+ SMU_UCLK, (uint32_t *)data);
+ /* the output clock frequency in 10K unit */
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu,
+ SMU_GFXCLK, (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v15_0_8_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf,
+ int *offset)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct smu_15_0_dpm_context *dpm_context;
+ struct smu_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ int ret, now, size = *offset;
+
+ if (amdgpu_ras_intr_triggered()) {
+ sysfs_emit_at(buf, size, "unavailable\n");
+ return -EBUSY;
+ }
+
+ dpm_context = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ break;
+ case SMU_OD_MCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ break;
+ case SMU_SCLK:
+ case SMU_GFXCLK:
+ single_dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ single_dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ single_dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ single_dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ break;
+ }
+
+ if (single_dpm_table) {
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu, type, &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current clk Failed!");
+ return ret;
+ }
+ ret = smu_cmn_print_dpm_clk_levels(smu, single_dpm_table, now,
+ buf, offset);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ }
+
+ *offset = size;
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ struct smu_dpm_table *dpm_table;
+ uint32_t min_clk = 0, max_clk = 0;
+
+ if (!pptable->init)
+ return -EINVAL;
+
+ /* Try cached DPM tables first */
+ if (dpm_context) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_GL2CLK:
+ dpm_table = &dpm_context->dpm_tables.gl2_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dpm_table = NULL;
+ break;
+ }
+
+ if (dpm_table && dpm_table->count > 0) {
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
+
+ if (min_clk && max_clk) {
+ if (min)
+ *min = min_clk;
+ if (max)
+ *max = max_clk;
+ return 0;
+ }
+ }
+ }
+
+ /* Fall back to pptable */
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ min_clk = pptable->MinGfxclkFrequency;
+ max_clk = pptable->MaxGfxclkFrequency;
+ break;
+ case SMU_FCLK:
+ min_clk = pptable->MinFclkFrequency;
+ max_clk = pptable->MaxFclkFrequency;
+ break;
+ case SMU_GL2CLK:
+ min_clk = pptable->MinGl2clkFrequency;
+ max_clk = pptable->MaxGl2clkFrequency;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_clk = pptable->UclkFrequencyTable[0];
+ max_clk = pptable->UclkFrequencyTable[ARRAY_SIZE(pptable->UclkFrequencyTable) - 1];
+ break;
+ case SMU_SOCCLK:
+ min_clk = pptable->SocclkFrequency;
+ max_clk = pptable->SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ min_clk = pptable->VclkFrequency;
+ max_clk = pptable->VclkFrequency;
+ break;
+ case SMU_DCLK:
+ min_clk = pptable->DclkFrequency;
+ max_clk = pptable->DclkFrequency;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = min_clk;
+ if (max)
+ *max = max_clk;
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_dpm_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_dpm_table *dpm_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int i, ret;
+ uint32_t gfxclkmin, gfxclkmax;
+
+ /* gfxclk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = smu_v15_0_8_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
+ &gfxclkmin, &gfxclkmax);
+ if (ret)
+ return ret;
+
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = gfxclkmin;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = gfxclkmax;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* fclk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = pptable->MinFclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = pptable->MaxFclkFrequency;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinFclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* gl2clk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.gl2_table;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GL2CLK_BIT)) {
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = pptable->MinGl2clkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = pptable->MaxGl2clkFrequency;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGl2clkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* uclk dpm table setup - discrete levels */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
+ dpm_table->flags = 0;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ dpm_table->count = ARRAY_SIZE(pptable->UclkFrequencyTable);
+ for (i = 0; i < dpm_table->count; ++i) {
+ dpm_table->dpm_levels[i].value = pptable->UclkFrequencyTable[i];
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->UclkFrequencyTable[0];
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* socclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->SocclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ /* vclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->VclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ /* dclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->DclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ return 0;
+}
+
+static int smu_v15_0_8_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ /* TODO: PPTable is not available.
+ * 1) Find an alternate way to get 'PPTable values' here.
+ * 2) Check if there is SW CTF
+ */
+ table_context->thermal_controller_type = 0;
+
+ return 0;
+}
+
+static int smu_v15_0_8_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_15_0_8 & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v15_0_8_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+
+ return smu_cmn_vram_cpy(smu, smu_table->metrics_table,
+ table->cpu_addr, table_size);
+}
+
+static int smu_v15_0_8_fru_get_product_info(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct amdgpu_fru_info *fru_info;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber,
+ sizeof(fru_info->product_number));
+ strscpy(fru_info->product_name, static_metrics->ProductInfo.Name,
+ sizeof(fru_info->product_name));
+ strscpy(fru_info->serial, static_metrics->ProductInfo.Serial,
+ sizeof(fru_info->serial));
+ strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName,
+ sizeof(fru_info->manufacturer_name));
+ strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId,
+ sizeof(fru_info->fru_id));
+
+ return 0;
+}
+
+static void smu_v15_0_8_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ uint16_t max_speed;
+ uint8_t max_width;
+
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
+}
+
+static int smu_v15_0_8_set_driver_pptable(struct smu_context *smu)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret, i, n;
+ uint32_t table_version;
+
+ if (!pptable->init) {
+ ret = smu_v15_0_8_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGfxclkFrequency);
+ pptable->MaxFclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxFclkFrequency);
+ pptable->MinFclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinFclkFrequency);
+ pptable->MaxGl2clkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGl2clkFrequency);
+ pptable->MinGl2clkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGl2clkFrequency);
+
+ for (i = 0; i < ARRAY_SIZE(static_metrics->UclkFrequencyTable); ++i)
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]);
+
+ pptable->SocclkFrequency = SMUQ10_ROUND(static_metrics->SocclkFrequency);
+ pptable->LclkFrequency = SMUQ10_ROUND(static_metrics->LclkFrequency);
+ pptable->VclkFrequency = SMUQ10_ROUND(static_metrics->VclkFrequency);
+ pptable->DclkFrequency = SMUQ10_ROUND(static_metrics->DclkFrequency);
+
+ pptable->CTFLimitMID = SMUQ10_ROUND(static_metrics->CTFLimit_MID);
+ pptable->CTFLimitAID = SMUQ10_ROUND(static_metrics->CTFLimit_AID);
+ pptable->CTFLimitXCD = SMUQ10_ROUND(static_metrics->CTFLimit_XCD);
+ pptable->CTFLimitHBM = SMUQ10_ROUND(static_metrics->CTFLimit_HBM);
+ pptable->ThermalLimitMID = SMUQ10_ROUND(static_metrics->ThermalLimit_MID);
+ pptable->ThermalLimitAID = SMUQ10_ROUND(static_metrics->ThermalLimit_AID);
+ pptable->ThermalLimitXCD = SMUQ10_ROUND(static_metrics->ThermalLimit_XCD);
+ pptable->ThermalLimitHBM = SMUQ10_ROUND(static_metrics->ThermalLimit_HBM);
+
+ /* use MID0 serial number by default */
+ pptable->PublicSerialNumberMID =
+ static_metrics->PublicSerialNumber_MID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumberMID);
+ pptable->PublicSerialNumberAID =
+ static_metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumberXCD =
+ static_metrics->PublicSerialNumber_XCD[0];
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_MID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_MID, i,
+ static_metrics->PublicSerialNumber_MID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
+ ret = smu_v15_0_8_fru_get_product_info(smu, static_metrics);
+ if (ret)
+ return ret;
+ pptable->PPT1Max = static_metrics->PPT1Max;
+ pptable->PPT1Min = static_metrics->PPT1Min;
+ pptable->PPT1Default = static_metrics->PPT1Default;
+
+ if (static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ smu_v15_0_8_init_xgmi_data(smu, static_metrics);
+ pptable->init = true;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_v15_0_8_set_driver_pptable(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_8_set_dpm_table(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int smu_v15_0_8_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_15_0_power_context *power_context = smu_power->power_context;
+ uint32_t client_id = entry->client_id;
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t src_id = entry->src_id;
+ uint32_t data;
+
+ if (client_id == SOC_V1_0_IH_CLIENTID_MP1) {
+ if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+ /*
+ * ctxid is used to distinguish different events for SMCToHost
+ * interrupt.
+ */
+ switch (ctxid) {
+ case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
+ /*
+ * Increment the throttle interrupt counter
+ */
+ atomic64_inc(&smu->throttle_int_counter);
+
+ if (!atomic_read(&adev->throttling_logging_enabled))
+ return 0;
+
+ /* This uses the new method which fixes the
+ * incorrect throttling status reporting
+ * through metrics table. For older FWs,
+ * it will be ignored.
+ */
+ if (__ratelimit(&adev->throttling_logging_rs)) {
+ atomic_set(
+ &power_context->throttle_status,
+ entry->src_data[1]);
+ schedule_work(&smu->throttling_logging_work);
+ }
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v15_0_8_irq_funcs = {
+ .set = smu_v15_0_8_set_irq_state,
+ .process = smu_v15_0_8_irq_process,
+};
+
+static int smu_v15_0_8_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v15_0_8_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_MP1,
+ IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v15_0_8_notify_unload(struct smu_context *smu)
+{
+ if (amdgpu_in_reset(smu->adev))
+ return 0;
+
+ dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
+ /* Ignore return, just intimate FW that driver is not going to be there */
+ smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
+ return 0;
+}
+
+
+static int smu_v15_0_8_system_features_control(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (enable)
+ ret = smu_v15_0_system_features_control(smu, enable);
+ else
+ smu_v15_0_8_notify_unload(smu);
+
+ return ret;
+}
+
+/**
+ * smu_v15_0_8_get_enabled_mask - Get enabled SMU features (128-bit)
+ * @smu: SMU context
+ * @feature_mask: feature mask structure
+ *
+ * SMU 15 returns all 128 feature bits in a single message via out_args[0..3].
+ * For backward compatibility, this function returns only the first 64 bits.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int smu_v15_0_8_get_enabled_mask(struct smu_context *smu,
+ struct smu_feature_bits *feature_mask)
+{
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_GetEnabledSmuFeatures,
+ .num_args = 0,
+ .num_out_args = 2,
+ };
+ int ret;
+
+ if (!feature_mask)
+ return -EINVAL;
+
+ ret = smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
+
+ if (ret)
+ return ret;
+
+ smu_feature_bits_from_arr32(feature_mask, args.out_args,
+ SMU_FEATURE_NUM_DEFAULT);
+
+ return 0;
+}
+
+static bool smu_v15_0_8_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_feature_bits feature_enabled;
+
+ ret = smu_v15_0_8_get_enabled_mask(smu, &feature_enabled);
+ if (ret)
+ return false;
+
+ return smu_feature_bits_test_mask(&feature_enabled,
+ smu_v15_0_8_dpm_features.bits);
+}
+
+static ssize_t smu_v15_0_8_get_pm_metrics(struct smu_context *smu,
+ void *metrics, size_t max_size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_pm_metrics *pm_metrics = (struct amdgpu_pm_metrics *)metrics;
+ uint32_t table_version = smu_table->tables[SMU_TABLE_SMU_METRICS].version;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ uint32_t pmfw_version;
+ int ret;
+
+ if (!pm_metrics || !max_size)
+ return -EINVAL;
+
+ if (max_size < (table_size + sizeof(pm_metrics->common_header)))
+ return -EOVERFLOW;
+
+ /* Don't use cached metrics data */
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 0, pm_metrics->data);
+ if (ret)
+ return ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+ memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header));
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
+ pm_metrics->common_header.pmfw_version = pmfw_version;
+ pm_metrics->common_header.pmmetrics_version = table_version;
+ pm_metrics->common_header.structure_size =
+ sizeof(pm_metrics->common_header) + table_size;
+
+ return pm_metrics->common_header.structure_size;
+}
+
+static int smu_v15_0_8_mode2_reset(struct smu_context *smu)
+{
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+ int ret = 0;
+
+ mutex_lock(&ctl->lock);
+
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2);
+
+ if (ret)
+ goto out;
+
+ /* Reset takes a bit longer, wait for 200ms. */
+ msleep(200);
+
+ dev_dbg(adev->dev, "wait for reset ack\n");
+ do {
+ ret = smu_msg_wait_response(ctl, 0);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret)
+ goto out;
+
+ } while (ret == -ETIME && timeout);
+
+out:
+ mutex_unlock(&ctl->lock);
+
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
+ return ret;
+}
+
+static bool smu_v15_0_8_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ return true;
+ return false;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void smu_v15_0_8_fill_baseboard_temp_metrics(
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics,
+ const SystemMetricsTable_t *metrics)
+{
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ baseboard_temp_metrics->system_temp_ubb_fpga =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA];
+ baseboard_temp_metrics->system_temp_ubb_front =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FRONT];
+ baseboard_temp_metrics->system_temp_ubb_back =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_BACK];
+ baseboard_temp_metrics->system_temp_ubb_oam7 =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_OAM7];
+ baseboard_temp_metrics->system_temp_ubb_ibc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_IBC];
+ baseboard_temp_metrics->system_temp_ubb_ufpga =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_UFPGA];
+ baseboard_temp_metrics->system_temp_ubb_oam1 =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_OAM1];
+ baseboard_temp_metrics->system_temp_oam_0_1_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_0_1_HSC];
+ baseboard_temp_metrics->system_temp_oam_2_3_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_2_3_HSC];
+ baseboard_temp_metrics->system_temp_oam_4_5_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_4_5_HSC];
+ baseboard_temp_metrics->system_temp_oam_6_7_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_6_7_HSC];
+ baseboard_temp_metrics->system_temp_ubb_fpga_0v72_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA_0V72_VR];
+ baseboard_temp_metrics->system_temp_ubb_fpga_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA_3V3_VR];
+ baseboard_temp_metrics->system_temp_retimer_0_1_2_3_1v2_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR];
+ baseboard_temp_metrics->system_temp_retimer_4_5_6_7_1v2_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR];
+ baseboard_temp_metrics->system_temp_retimer_0_1_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_0_1_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_4_5_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_4_5_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_2_3_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_2_3_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_6_7_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_6_7_0V9_VR];
+ baseboard_temp_metrics->system_temp_oam_0_1_2_3_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR];
+ baseboard_temp_metrics->system_temp_oam_4_5_6_7_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR];
+ baseboard_temp_metrics->system_temp_ibc_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_IBC_HSC];
+ baseboard_temp_metrics->system_temp_ibc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_IBC];
+}
+
+static void smu_v15_0_8_fill_gpuboard_temp_metrics(
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics,
+ const SystemMetricsTable_t *metrics)
+{
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ gpuboard_temp_metrics->node_temp_retimer =
+ metrics->NodeTemperatures[NODE_TEMP_RETIMER];
+ gpuboard_temp_metrics->node_temp_ibc =
+ metrics->NodeTemperatures[NODE_TEMP_IBC_TEMP];
+ gpuboard_temp_metrics->node_temp_ibc_2 =
+ metrics->NodeTemperatures[NODE_TEMP_IBC_2_TEMP];
+ gpuboard_temp_metrics->node_temp_vdd18_vr =
+ metrics->NodeTemperatures[NODE_TEMP_VDD18_VR_TEMP];
+ gpuboard_temp_metrics->node_temp_04_hbm_b_vr =
+ metrics->NodeTemperatures[NODE_TEMP_04_HBM_B_VR_TEMP];
+ gpuboard_temp_metrics->node_temp_04_hbm_d_vr =
+ metrics->NodeTemperatures[NODE_TEMP_04_HBM_D_VR_TEMP];
+
+ gpuboard_temp_metrics->vr_temp_vddcr_socio_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_SOCIO_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_socio_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_SOCIO_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_x0 =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_X0_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_x1 =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_X1_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_04_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_04_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_04_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_04_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_11_gta_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_11_GTA_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_11_gta_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_11_GTA_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075_gta_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_GTA_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075_gta_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_GTA_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_ucie =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_UCIE_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieaa =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAA_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieam_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAM_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieam_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAM_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075 =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_TEMP];
+}
+
+static ssize_t smu_v15_0_8_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type,
+ void *table)
+{
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics;
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ ret = smu_v15_0_8_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (type) {
+ case SMU_TEMP_METRIC_GPUBOARD:
+ gpuboard_temp_metrics =
+ (struct smu_v15_0_8_gpuboard_temp_metrics *)
+ smu_driver_table_ptr(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_v15_0_8_fill_gpuboard_temp_metrics(gpuboard_temp_metrics,
+ metrics);
+ memcpy(table, gpuboard_temp_metrics, sizeof(*gpuboard_temp_metrics));
+ return sizeof(*gpuboard_temp_metrics);
+ case SMU_TEMP_METRIC_BASEBOARD:
+ baseboard_temp_metrics =
+ (struct smu_v15_0_8_baseboard_temp_metrics *)
+ smu_driver_table_ptr(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_v15_0_8_fill_baseboard_temp_metrics(baseboard_temp_metrics,
+ metrics);
+ memcpy(table, baseboard_temp_metrics, sizeof(*baseboard_temp_metrics));
+ return sizeof(*baseboard_temp_metrics);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t smu_v15_0_8_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_v15_0_8_gpu_metrics *gpu_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j, idx;
+ uint32_t aid_mask = adev->aid_mask;
+ uint32_t mid_mask = adev->aid_mask;
+ MetricsTable_t *metrics;
+
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 1, NULL);
+ if (ret)
+ return ret;
+
+ metrics = (MetricsTable_t *)smu_table->metrics_table;
+ gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPU_METRICS);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature);
+
+ /* Per-HBM stack temperatures */
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask)/4 > SMU_15_0_8_MAX_HBM_STACKS))
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ SMUQ10_ROUND(metrics->HbmTemperature[stack_idx]);
+ }
+ }
+ }
+
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc = SMUQ10_ROUND(metrics->MaxVrTemperature);
+ /* MID, AID, XCD temperatures */
+ idx = 0;
+ for_each_inst(i, mid_mask) {
+ gpu_metrics->temperature_mid[idx] = SMUQ10_ROUND(metrics->MidTemperature[i]);
+ idx++;
+ }
+
+ idx = 0;
+ for_each_inst(i, aid_mask) {
+ gpu_metrics->temperature_aid[idx] = SMUQ10_ROUND(metrics->AidTemperature[i]);
+ idx++;
+ }
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->temperature_xcd[i] = SMUQ10_ROUND(metrics->XcdTemperature[xcc_id]);
+ }
+ /* Power */
+ gpu_metrics->curr_socket_power = SMUQ10_ROUND(metrics->SocketPower);
+
+ gpu_metrics->average_gfx_activity = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ gpu_metrics->mem_max_bandwidth = SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0) {
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ }
+ }
+
+ /* Per-MID clocks */
+ idx = 0;
+ for_each_inst(i, mid_mask) {
+ gpu_metrics->current_socclk[idx] = SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ idx++;
+ }
+
+ /* Per-VCN clocks */
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] = SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ gpu_metrics->current_dclk0[i] = SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ }
+ }
+
+ /* Per-AID clocks */
+ idx = 0;
+ for_each_inst(i, aid_mask) {
+ gpu_metrics->current_uclk[idx] = SMUQ10_ROUND(metrics->UclkFrequency[i]);
+ idx++;
+ }
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+ gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+ gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+ gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+ gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+ gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[j] = ret;
+ }
+
+ gpu_metrics->xgmi_read_data_acc = SMUQ10_ROUND(metrics->XgmiReadBandwidthAcc);
+ gpu_metrics->xgmi_write_data_acc = SMUQ10_ROUND(metrics->XgmiWriteBandwidthAcc);
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_ppt_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_thm_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->gfx_low_utilization_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+
+ gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
+ gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
+
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+ *table = gpu_metrics;
+
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPU_METRICS);
+
+ return sizeof(*gpu_metrics);
+}
+
+static void smu_v15_0_8_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+
+ adev->unique_id = pptable->PublicSerialNumberMID;
+}
+
+static int smu_v15_0_8_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ uint32_t power_limit = 0;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Couldn't get PPT limit");
+ return -EINVAL;
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+
+ if (default_power_limit)
+ *default_power_limit = pptable->MaxSocketPowerLimit;
+
+ if (max_power_limit)
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+
+ if (min_power_limit)
+ *min_power_limit = 0;
+
+ return 0;
+}
+
+static int smu_v15_0_8_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
+
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
+ return 0;
+}
+
+static int smu_v15_0_8_set_gfx_soft_freq_limited_range(struct smu_context *smu,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ max & 0xffff, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
+ min & 0xffff, NULL);
+
+ return ret;
+}
+
+static int smu_v15_0_8_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_15_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ /* Determinism not supported on SMU v15.0.8 */
+ ret = -EOPNOTSUPP;
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ /* Restore GFXCLK to default range */
+ if ((SMU_DPM_TABLE_MIN(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.min) ||
+ (SMU_DPM_TABLE_MAX(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.max)) {
+ ret = smu_v15_0_8_set_gfx_soft_freq_limited_range(
+ smu, SMU_DPM_TABLE_MIN(gfx_table),
+ SMU_DPM_TABLE_MAX(gfx_table));
+ if (ret)
+ goto out;
+
+ pstate_table->gfxclk_pstate.curr.min =
+ SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ }
+
+ /* Restore UCLK to default max */
+ if (SMU_DPM_TABLE_MAX(uclk_table) !=
+ pstate_table->uclk_pstate.curr.max) {
+ /* Min UCLK is not expected to be changed */
+ ret = smu_v15_0_set_soft_freq_limited_range(smu,
+ SMU_UCLK, 0,
+ SMU_DPM_TABLE_MAX(uclk_table),
+ false);
+ if (ret)
+ goto out;
+
+ pstate_table->uclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(uclk_table);
+ }
+
+ if (ret)
+ goto out;
+
+ smu_cmn_reset_custom_level(smu);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static int smu_v15_0_8_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max,
+ bool automatic)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
+ clk_type != SMU_UCLK)
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ if (min >= max) {
+ dev_err(smu->adev->dev,
+ "Minimum clk should be less than the maximum allowed clock\n");
+ return -EINVAL;
+ }
+
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) {
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v15_0_8_set_gfx_soft_freq_limited_range(smu, min,
+ max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+
+ if (clk_type == SMU_UCLK) {
+ if (max == pstate_table->uclk_pstate.curr.max)
+ return 0;
+
+ ret = smu_v15_0_set_soft_freq_limited_range(smu, SMU_UCLK, 0,
+ max, false);
+ if (!ret)
+ pstate_table->uclk_pstate.curr.max = max;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct smu_15_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ uint32_t min_clk, max_clk;
+ int ret;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+ if (input[0] == 0) {
+ if (input[1] < min_clk) {
+ dev_warn(smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1], min_clk);
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.min = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "UCLK_LIMITS setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min UCLK level is not supported");
+ return -EINVAL;
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->uclk_pstate.custom.max =
+ pstate_table->uclk_pstate.curr.max;
+
+ return -EINVAL;
+ }
+
+ pstate_table->uclk_pstate.custom.max = input[1];
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ /* Use the default frequencies for manual mode */
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.uclk_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_UCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ smu_cmn_reset_custom_level(smu);
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!pstate_table->gfxclk_pstate.custom.min)
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+
+ if (!pstate_table->gfxclk_pstate.custom.max)
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+
+ min_clk = pstate_table->gfxclk_pstate.custom.min;
+ max_clk = pstate_table->gfxclk_pstate.custom.max;
+
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ /* Commit UCLK custom range (only max supported) */
+ if (pstate_table->uclk_pstate.custom.max) {
+ min_clk = pstate_table->uclk_pstate.curr.min;
+ max_clk = pstate_table->uclk_pstate.custom.max;
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_UCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ uint32_t max_ctf, max_thm;
+
+ if (amdgpu_sriov_multi_vf_mode(smu->adev))
+ return 0;
+
+ if (!range)
+ return -EINVAL;
+
+ /* CTF (Critical Temperature Fault) limits */
+ max_ctf = max3(pptable->CTFLimitMID, pptable->CTFLimitXCD,
+ pptable->CTFLimitAID);
+ range->hotspot_emergency_max = max_ctf * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ range->mem_emergency_max = pptable->CTFLimitHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Thermal throttling limits */
+ max_thm = max3(pptable->ThermalLimitMID, pptable->ThermalLimitXCD,
+ pptable->ThermalLimitAID);
+ range->hotspot_crit_max = max_thm * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ range->mem_crit_max = pptable->ThermalLimitHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret;
+
+ if (limit_type == SMU_FAST_PPT_LIMIT) {
+ if (!pptable->PPT1Max)
+ return -EOPNOTSUPP;
+
+ if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
+ dev_err(smu->adev->dev,
+ "New PPT1 limit (%d) should be between min %d and max %d\n",
+ limit, pptable->PPT1Min, pptable->PPT1Max);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
+ limit, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
+
+ return ret;
+ }
+
+ return smu_v15_0_set_power_limit(smu, limit_type, limit);
+}
+
+static int smu_v15_0_8_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret = 0;
+
+ if (!ppt_limit)
+ return -EINVAL;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ if (!pptable->PPT1Max)
+ return -EOPNOTSUPP;
+
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = pptable->PPT1Max;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit,
+ ppt_limit);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Get fast PPT limit failed!\n");
+ break;
+ case SMU_PPT_LIMIT_DEFAULT:
+ *ppt_limit = pptable->PPT1Default;
+ break;
+ case SMU_PPT_LIMIT_MIN:
+ *ppt_limit = pptable->PPT1Min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct pptable_funcs smu_v15_0_8_ppt_funcs = {
+ .init_allowed_features = smu_v15_0_8_init_allowed_features,
+ .set_default_dpm_table = smu_v15_0_8_set_default_dpm_table,
+ .is_dpm_running = smu_v15_0_8_is_dpm_running,
+ .init_smc_tables = smu_v15_0_8_init_smc_tables,
+ .fini_smc_tables = smu_v15_0_8_fini_smc_tables,
+ .init_power = smu_v15_0_init_power,
+ .fini_power = smu_v15_0_fini_power,
+ .check_fw_status = smu_v15_0_8_check_fw_status,
+ .check_fw_version = smu_cmn_check_fw_version,
+ .set_driver_table_location = smu_v15_0_set_driver_table_location,
+ .set_tool_table_location = smu_v15_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v15_0_notify_memory_pool_location,
+ .system_features_control = smu_v15_0_8_system_features_control,
+ .get_enabled_mask = smu_v15_0_8_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .register_irq_handler = smu_v15_0_8_register_irq_handler,
+ .setup_pptable = smu_v15_0_8_setup_pptable,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .wait_for_event = smu_v15_0_wait_for_event,
+ .get_pm_metrics = smu_v15_0_8_get_pm_metrics,
+ .mode2_reset = smu_v15_0_8_mode2_reset,
+ .get_dpm_ultimate_freq = smu_v15_0_8_get_dpm_ultimate_freq,
+ .get_gpu_metrics = smu_v15_0_8_get_gpu_metrics,
+ .get_unique_id = smu_v15_0_8_get_unique_id,
+ .get_power_limit = smu_v15_0_8_get_power_limit,
+ .set_power_limit = smu_v15_0_8_set_power_limit,
+ .get_ppt_limit = smu_v15_0_8_get_ppt_limit,
+ .emit_clk_levels = smu_v15_0_8_emit_clk_levels,
+ .read_sensor = smu_v15_0_8_read_sensor,
+ .populate_umd_state_clk = smu_v15_0_8_populate_umd_state_clk,
+ .set_performance_level = smu_v15_0_8_set_performance_level,
+ .od_edit_dpm_table = smu_v15_0_8_od_edit_dpm_table,
+ .get_thermal_temperature_range = smu_v15_0_8_get_thermal_temperature_range,
+};
+
+static void smu_v15_0_8_init_msg_ctl(struct smu_context *smu,
+ const struct cmn2asic_msg_mapping *message_map)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+
+ ctl->smu = smu;
+ mutex_init(&ctl->lock);
+ ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_40);
+ ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_41);
+ ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_42);
+ ctl->config.arg_regs[1] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_43);
+ ctl->config.arg_regs[2] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_44);
+ ctl->config.arg_regs[3] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_45);
+ ctl->config.num_arg_regs = 4;
+ ctl->ops = &smu_msg_v1_ops;
+ ctl->default_timeout = adev->usec_timeout * 20;
+ ctl->message_map = message_map;
+}
+
+static const struct smu_temp_funcs smu_v15_0_8_temp_funcs = {
+ .temp_metrics_is_supported = smu_v15_0_8_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v15_0_8_get_temp_metrics,
+};
+
+void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v15_0_8_ppt_funcs;
+ smu->clock_map = smu_v15_0_8_clk_map;
+ smu->feature_map = smu_v15_0_8_feature_mask_map;
+ smu->table_map = smu_v15_0_8_table_map;
+ smu_v15_0_8_init_msg_ctl(smu, smu_v15_0_8_message_map);
+ smu->smu_temp.temp_funcs = &smu_v15_0_8_temp_funcs;
+ smu->smc_driver_if_version = SMU15_DRIVER_IF_VERSION_SMU_V15_0_8;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
new file mode 100644
index 000000000000..398ce4482174
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_15_0_8_PPT_H__
+#define __SMU_15_0_8_PPT_H__
+
+#define SMU_15_0_8_NUM_XGMI_LINKS 8
+#define SMU_15_0_8_MAX_GFX_CLKS 8
+#define SMU_15_0_8_MAX_CLKS 4
+#define SMU_15_0_8_MAX_XCC 8
+#define SMU_15_0_8_MAX_VCN 4
+#define SMU_15_0_8_MAX_JPEG 40
+#define SMU_15_0_8_MAX_AID 2
+#define SMU_15_0_8_MAX_MID 2
+#define SMU_15_0_8_MAX_HBM_STACKS 12
+extern void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu);
+
+typedef struct {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t MaxFclkFrequency;
+ uint32_t MinFclkFrequency;
+ uint32_t MaxGl2clkFrequency;
+ uint32_t MinGl2clkFrequency;
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t CTFLimitMID;
+ uint32_t CTFLimitAID;
+ uint32_t CTFLimitXCD;
+ uint32_t CTFLimitHBM;
+ uint32_t ThermalLimitMID;
+ uint32_t ThermalLimitAID;
+ uint32_t ThermalLimitXCD;
+ uint32_t ThermalLimitHBM;
+ uint64_t PublicSerialNumberMID;
+ uint64_t PublicSerialNumberAID;
+ uint64_t PublicSerialNumberXCD;
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+ bool init;
+} PPTable_t;
+
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 15.0.8 GPU metrics*/
+#define SMU_15_0_8_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hotspot); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mem); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_vrsoc); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_15_0_8_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_MID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mid, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_15_0_8_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_15_0_8_MAX_XCC); \
+ SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
+ SMU_MTYPE(U16), curr_socket_power); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_gfx_activity); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_umc_activity); \
+ SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), mem_max_bandwidth); \
+ SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), energy_accumulator); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
+ SMU_MTYPE(U64), system_clock_counter); \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), prochot_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), ppt_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), socket_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), vr_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), hbm_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), gfxclk_lock_status); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), pcie_link_width); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
+ SMU_MTYPE(U16), pcie_link_speed); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_width); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
+ SMU_MTYPE(U16), xgmi_link_speed); \
+ SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), mem_activity_acc); \
+ SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_bandwidth_acc, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U32), pcie_bandwidth_inst, SMU_15_0_8_MAX_MID); \
+ SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_nak_sent_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_nak_rcvd_count_acc); \
+ SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_status, \
+ SMU_15_0_8_NUM_XGMI_LINKS); \
+ SMU_SCALAR(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_read_data_acc); \
+ SMU_SCALAR(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_write_data_acc); \
+ SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
+ SMU_MTYPE(U64), firmware_timestamp); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_15_0_8_MAX_GFX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk, SMU_15_0_8_MAX_AID); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
+ SMU_MUNIT(NONE), SMU_MTYPE(U64), \
+ pcie_lc_perf_other_end_recovery); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_15_0_8_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(NONE), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_15_0_8_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpu_metrics, SMU_15_0_8_METRICS_FIELDS);
+
+/* Maximum temperature sensor counts for system metrics */
+#define SMU_15_0_8_MAX_SYSTEM_TEMP_ENTRIES 32
+#define SMU_15_0_8_MAX_NODE_TEMP_ENTRIES 12
+#define SMU_15_0_8_MAX_VR_TEMP_ENTRIES 22
+
+/* SMUv 15.0.8 GPU board temperature metrics */
+#define SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), label_version); \
+ SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), node_id); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_RETIMER), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_retimer); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_ibc); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC_2), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_ibc_2); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_VDD18_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_vdd18_vr); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_B_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_04_hbm_b_vr); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_D_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_04_hbm_d_vr); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_socio_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_socio_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X0), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_x0); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X1), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_x1); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_04_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_04_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_11_gta_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_11_gta_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075_gta_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075_gta_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_UCIE), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_ucie); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieaa); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpuboard_temp_metrics,
+ SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS);
+
+/* SMUv 15.0.8 Baseboard temperature metrics - ID-based approach */
+#define SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), label_version); \
+ SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), node_id); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FRONT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_front); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_BACK), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_back); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM7), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_oam7); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_ibc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_UFPGA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_ufpga); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM1), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_oam1); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_0_1_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_2_3_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_2_3_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_4_5_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_6_7_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_6_7_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_0V72_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga_0v72_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_0_1_2_3_1v2_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_4_5_6_7_1v2_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_0_1_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_4_5_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_2_3_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_2_3_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_6_7_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_6_7_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_0_1_2_3_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_4_5_6_7_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ibc_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ibc);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_baseboard_temp_metrics,
+ SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS);
+#endif
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 6fd50c2fd20e..006ef585a377 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -405,7 +405,7 @@ static int __smu_msg_v1_ras_filter(struct smu_msg_ctl *ctl,
}
/**
- * smu_msg_proto_v1_send_msg - Complete V1 protocol with all filtering
+ * smu_msg_v1_send_msg - Complete V1 protocol with all filtering
* @ctl: Message control block
* @args: Message arguments
*
@@ -880,7 +880,7 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
- int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
+ int16_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
struct smu_feature_bits feature_mask;
uint32_t features[2];
int i, feature_index;
@@ -1035,6 +1035,31 @@ int smu_cmn_get_smc_version(struct smu_context *smu,
return ret;
}
+int smu_cmn_check_fw_version(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ int ret;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+ adev->pm.fw_version = smu_version;
+
+ dev_info_once(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_driver_if_version, if_version,
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+
+ return 0;
+}
+
int smu_cmn_update_table(struct smu_context *smu,
enum smu_table_id table_index,
int argument,
@@ -1079,6 +1104,18 @@ int smu_cmn_update_table(struct smu_context *smu,
return 0;
}
+int smu_cmn_vram_cpy(struct smu_context *smu, void *dst, const void *src,
+ size_t len)
+{
+ memcpy(dst, src, len);
+
+ /* Don't trust the copy operation if RAS fatal error happened. */
+ if (amdgpu_ras_get_fed_status(smu->adev))
+ return -EHWPOISON;
+
+ return 0;
+}
+
int smu_cmn_write_watermarks_table(struct smu_context *smu)
{
void *watermarks_table = smu->smu_table.watermarks_table;
@@ -1276,6 +1313,16 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
}
}
+void smu_cmn_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+}
+
static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)
{
/* Frequencies within 25 MHz are considered equal */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index b7bfddc65fb2..d129907535bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -174,6 +174,9 @@ int smu_cmn_update_table(struct smu_context *smu,
void *table_data,
bool drv2smu);
+int smu_cmn_vram_cpy(struct smu_context *smu, void *dst,
+ const void *src, size_t len);
+
int smu_cmn_write_watermarks_table(struct smu_context *smu);
int smu_cmn_write_pptable(struct smu_context *smu);
@@ -204,9 +207,11 @@ int smu_cmn_print_pcie_levels(struct smu_context *smu,
struct smu_pcie_table *pcie_table,
uint32_t cur_gen, uint32_t cur_lane,
char *buf, int *offset);
+void smu_cmn_reset_custom_level(struct smu_context *smu);
int smu_cmn_dpm_pcie_gen_idx(int gen);
int smu_cmn_dpm_pcie_width_idx(int width);
+int smu_cmn_check_fw_version(struct smu_context *smu);
/*SMU gpu metrics */