summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-03-27 09:30:34 +1000
committerDave Airlie <airlied@redhat.com>2026-03-27 09:37:08 +1000
commit3f4207256ef421db03fd583e1954ab5fa6cf9919 (patch)
tree67c5ba24539f3087731a4846c73170a0a0e90de1
parent13c072b8e91a5ccb5855ca1ba6fe3ea467dbf94d (diff)
parent68178644c35fca972ed970dc84933281b4913bff (diff)
Merge tag 'amd-drm-next-7.1-2026-03-25' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-7.1-2026-03-25: amdgpu: - DSC fix - Module parameter parsing fix - PASID reuse fix - drm_edid leak fix - SMU 13.x fixes - SMU 14.x fix - Fence fix in amdgpu_amdkfd_submit_ib() - LVDS fixes - GPU page fault fix for non-4K pages - Misc cleanups - UserQ fixes - SMU 15.0.8 support - RAS updates - Devcoredump fixes - GFX queue priority fixes - DPIA fixes - DCN 4.2 updates - Add debugfs interface for pcie64 registers - SMU 15.x fixes - VCN reset fixes - Documentation fixes amdkfd: - Ordering fix in kfd_ioctl_create_process() Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20260325175012.4185721-1-alexander.deucher@amd.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c263
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c500
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn42/dcn42_pg_cntl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h67
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h295
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h427
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c104
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c160
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c2270
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h313
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h1
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c22
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c2
104 files changed, 4866 insertions, 944 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 892c90b8d063..49e7881750fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -690,6 +690,7 @@ enum amdgpu_uid_type {
AMDGPU_UID_TYPE_XCD,
AMDGPU_UID_TYPE_AID,
AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MID,
AMDGPU_UID_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 40c22438b1d2..4f27c75abedb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -692,9 +692,9 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err_ib_sched;
}
- /* Drop the initial kref_init count (see drm_sched_main as example) */
- dma_fence_put(f);
ret = dma_fence_wait(f, false);
+ /* Drop the returned fence reference after the wait completes */
+ dma_fence_put(f);
err_ib_sched:
amdgpu_job_free(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index aa9239b310a3..092fd3309099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -618,6 +618,110 @@ out:
}
/**
+ * amdgpu_debugfs_regs_pcie64_read - Read from a 64-bit PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_regs_pcie64_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x7 || *pos & 0x7)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint64_t value;
+
+ value = RREG64_PCIE_EXT(*pos);
+
+ r = put_user(value, (uint64_t *)buf);
+ if (r)
+ goto out;
+
+ result += 8;
+ buf += 8;
+ *pos += 8;
+ size -= 8;
+ }
+
+ r = result;
+out:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_pcie64_write - Write to a 64-bit PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_regs_pcie64_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x7 || *pos & 0x7)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint64_t value;
+
+ r = get_user(value, (uint64_t *)buf);
+ if (r)
+ goto out;
+
+ WREG64_PCIE_EXT(*pos, value);
+
+ result += 8;
+ buf += 8;
+ *pos += 8;
+ size -= 8;
+ }
+
+ r = result;
+out:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
* amdgpu_debugfs_regs_didt_read - Read from a DIDT register
*
* @f: open file handle
@@ -1525,6 +1629,12 @@ static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
.write = amdgpu_debugfs_regs_pcie_write,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_regs_pcie64_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie64_read,
+ .write = amdgpu_debugfs_regs_pcie64_write,
+ .llseek = default_llseek
+};
static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_smc_read,
@@ -1587,6 +1697,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gprwave_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_pcie64_fops,
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
@@ -1604,6 +1715,7 @@ static const char * const debugfs_regs_names[] = {
"amdgpu_gprwave",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
+ "amdgpu_regs_pcie64",
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ac5769d9e75c..a7038f039b10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3498,7 +3498,8 @@ fail:
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
{
- char *input = amdgpu_lockup_timeout;
+ char buf[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
+ char *input = buf;
char *timeout_setting = NULL;
int index = 0;
long timeout;
@@ -3508,9 +3509,17 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
adev->video_timeout = msecs_to_jiffies(2000);
- if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ if (!strnlen(amdgpu_lockup_timeout, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
return 0;
+ /*
+ * strsep() destructively modifies its input by replacing delimiters
+ * with '\0'. Use a stack copy so the global module parameter buffer
+ * remains intact for multi-GPU systems where this function is called
+ * once per device.
+ */
+ strscpy(buf, amdgpu_lockup_timeout, sizeof(buf));
+
while ((timeout_setting = strsep(&input, ",")) &&
strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
ret = kstrtol(timeout_setting, 0, &timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0eb0c62d2f4f..8ec5465c3349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -324,7 +324,7 @@ static int amdgpu_discovery_get_tmr_info(struct amdgpu_device *adev,
ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
if (ret)
return ret;
- adev->discovery.size = (u32)tmr_size;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
adev->discovery.offset = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
}
}
@@ -1394,6 +1394,9 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
struct list_head *el, *tmp;
struct kset *die_kset;
+ if (!ip_top)
+ return;
+
die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
@@ -1418,9 +1421,13 @@ void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p)
struct ip_hw_instance *ip_inst;
int i = 0, j;
+ if (!ip_top)
+ return;
+
die_kset = &ip_top->die_kset;
drm_printf(p, "\nHW IP Discovery\n");
+
spin_lock(&die_kset->list_lock);
list_for_each(el_die, &die_kset->list) {
drm_printf(p, "die %d\n", i++);
@@ -1977,11 +1984,10 @@ static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
- struct amdgpu_gmc_memrange **ranges,
+ struct amdgpu_gmc_memrange *ranges,
int *range_cnt, bool refresh)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct amdgpu_gmc_memrange *mem_ranges;
struct table_info *info;
union nps_info *nps_info;
union nps_info nps_data;
@@ -2019,20 +2025,22 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
- mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count);
- if (!mem_ranges)
- return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
+ if (*range_cnt < nps_info->v1.count) {
+ dev_dbg(adev->dev,
+ "not enough space for nps ranges: %d < %d\n",
+ *range_cnt, nps_info->v1.count);
+ return -ENOSPC;
+ }
*range_cnt = nps_info->v1.count;
for (i = 0; i < *range_cnt; i++) {
- mem_ranges[i].base_address =
+ ranges[i].base_address =
nps_info->v1.instance_info[i].base_address;
- mem_ranges[i].limit_address =
+ ranges[i].limit_address =
nps_info->v1.instance_info[i].limit_address;
- mem_ranges[i].nid_mask = -1;
- mem_ranges[i].flags = 0;
+ ranges[i].nid_mask = -1;
+ ranges[i].flags = 0;
}
- *ranges = mem_ranges;
break;
default:
dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
@@ -2334,6 +2342,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
case IP_VERSION(15, 0, 0):
+ case IP_VERSION(15, 0, 8):
amdgpu_device_ip_block_add(adev, &smu_v15_0_ip_block);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index a7aeb47887a3..0ff1a7923eed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -46,7 +46,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
- struct amdgpu_gmc_memrange **ranges,
+ struct amdgpu_gmc_memrange *ranges,
int *range_cnt, bool refresh);
void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
index 55e8b9cc2f9f..4c5e38dea4c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -64,10 +64,19 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
container_of(evf_mgr, struct amdgpu_fpriv, evf_mgr);
struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
struct dma_fence *ev_fence;
+ bool cookie;
mutex_lock(&uq_mgr->userq_mutex);
+
+ /*
+ * This is intentionally after taking the userq_mutex since we do
+ * allocate memory while holding this lock, but only after ensuring that
+ * the eviction fence is signaled.
+ */
+ cookie = dma_fence_begin_signalling();
+
ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
- amdgpu_userq_evict(uq_mgr, !evf_mgr->shutdown);
+ amdgpu_userq_evict(uq_mgr);
/*
* Signaling the eviction fence must be done while holding the
@@ -75,7 +84,12 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
* next fence.
*/
dma_fence_signal(ev_fence);
+ dma_fence_end_signalling(cookie);
dma_fence_put(ev_fence);
+
+ if (!evf_mgr->shutdown)
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+
mutex_unlock(&uq_mgr->userq_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d209591e3710..8048a4c04b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -438,7 +438,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* @ring: ring to init the fence driver on
*
* Init the fence driver for the requested ring (all asics).
- * Helper function for amdgpu_fence_driver_init().
+ * Helper function for amdgpu_fence_driver_sw_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 01dc73309d73..5376035d32fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -30,6 +30,7 @@
#include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/dma-fence-unwrap.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -106,6 +107,7 @@ amdgpu_gem_update_timeline_node(struct drm_file *filp,
*chain = dma_fence_chain_alloc();
if (!*chain) {
drm_syncobj_put(*syncobj);
+ *syncobj = NULL;
return -ENOMEM;
}
@@ -741,11 +743,10 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct dma_fence *fence;
int r = 0;
- /* Always start from the VM's existing last update fence. */
- fence = dma_fence_get(vm->last_update);
-
+ /* If the VM is not ready return only a stub. */
if (!amdgpu_vm_ready(vm))
- return fence;
+ return dma_fence_get_stub();
+
/*
* First clean up any freed mappings in the VM.
@@ -754,7 +755,7 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
* schedules GPU work. If nothing needs clearing, @fence can remain as
* the original vm->last_update.
*/
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ r = amdgpu_vm_clear_freed(adev, vm, &vm->last_update);
if (r)
goto error;
@@ -771,47 +772,34 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error;
- /*
- * Decide which fence best represents the last update:
- *
- * MAP/REPLACE:
- * - For always-valid mappings, use vm->last_update.
- * - Otherwise, export bo_va->last_pt_update.
- *
- * UNMAP/CLEAR:
- * Keep the fence returned by amdgpu_vm_clear_freed(). If no work was
- * needed, it can remain as vm->last_pt_update.
- *
- * The VM and BO update fences are always initialized to a valid value.
- * vm->last_update and bo_va->last_pt_update always start as valid fences.
- * and are never expected to be NULL.
- */
- switch (operation) {
- case AMDGPU_VA_OP_MAP:
- case AMDGPU_VA_OP_REPLACE:
+ if ((operation == AMDGPU_VA_OP_MAP ||
+ operation == AMDGPU_VA_OP_REPLACE) &&
+ !amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo)) {
+
/*
- * For MAP/REPLACE, return the page table update fence for the
- * mapping we just modified. bo_va is expected to be valid here.
+ * For MAP/REPLACE of non per-VM BOs we need to sync to both the
+ * bo_va->last_pt_update and vm->last_update or otherwise we
+ * potentially miss the PDE updates.
*/
- dma_fence_put(fence);
-
- if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo))
- fence = dma_fence_get(vm->last_update);
- else
- fence = dma_fence_get(bo_va->last_pt_update);
- break;
- case AMDGPU_VA_OP_UNMAP:
- case AMDGPU_VA_OP_CLEAR:
- default:
- /* keep @fence as returned by amdgpu_vm_clear_freed() */
- break;
+ fence = dma_fence_unwrap_merge(vm->last_update,
+ bo_va->last_pt_update);
+ if (!fence) {
+ /* As fallback in OOM situations */
+ dma_fence_wait(vm->last_update, false);
+ dma_fence_wait(bo_va->last_pt_update, false);
+ fence = dma_fence_get_stub();
+ }
+ } else {
+ fence = dma_fence_get(vm->last_update);
}
+ return fence;
+
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
- return fence;
+ return dma_fence_get(vm->last_update);
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -832,7 +820,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_va *bo_va;
struct drm_syncobj *timeline_syncobj = NULL;
struct dma_fence_chain *timeline_chain = NULL;
- struct dma_fence *fence;
struct drm_exec exec;
uint64_t vm_size;
int r = 0;
@@ -884,6 +871,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->flags & AMDGPU_VM_DELAY_UPDATE &&
+ args->vm_timeline_syncobj_out)
+ return -EINVAL;
+
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@@ -973,11 +964,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
* that represents the last relevant update for this mapping. This
* fence can then be exported to the user-visible VM timeline.
*/
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+ (!adev->debug_vm || timeline_syncobj)) {
+ struct dma_fence *fence;
+
fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
-
- if (timeline_syncobj && fence) {
+ if (timeline_syncobj) {
if (!args->vm_timeline_point) {
/* Replace the existing fence when no point is given. */
drm_syncobj_replace_fence(timeline_syncobj,
@@ -988,6 +981,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
timeline_chain,
fence,
args->vm_timeline_point);
+ timeline_chain = NULL;
}
}
dma_fence_put(fence);
@@ -995,6 +989,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
error:
+ dma_fence_chain_free(timeline_chain);
+ if (timeline_syncobj)
+ drm_syncobj_put(timeline_syncobj);
drm_exec_fini(&exec);
error_put_gobj:
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a0940db1cd36..860a4405f7dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1374,18 +1374,18 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
uint8_t *exp_ranges)
{
- struct amdgpu_gmc_memrange *ranges;
+ struct amdgpu_gmc_memrange ranges[AMDGPU_MAX_MEM_RANGES];
int range_cnt, ret, i, j;
uint32_t nps_type;
bool refresh;
if (!mem_ranges || !exp_ranges)
return -EINVAL;
-
+ range_cnt = AMDGPU_MAX_MEM_RANGES;
refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
(adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
- ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
- &range_cnt, refresh);
+ ret = amdgpu_discovery_get_nps_info(adev, &nps_type, ranges, &range_cnt,
+ refresh);
if (ret)
return ret;
@@ -1446,8 +1446,6 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
if (!*exp_ranges)
*exp_ranges = range_cnt;
err:
- kvfree(ranges);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 64c519cd7395..d88523568b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -35,10 +35,13 @@
* PASIDs are global address space identifiers that can be shared
* between the GPU, an IOMMU and the driver. VMs on different devices
* may use the same PASID if they share the same address
- * space. Therefore PASIDs are allocated using a global IDA. VMs are
- * looked up from the PASID per amdgpu_device.
+ * space. Therefore PASIDs are allocated using IDR cyclic allocator
+ * (similar to kernel PID allocation) which naturally delays reuse.
+ * VMs are looked up from the PASID per amdgpu_device.
*/
-static DEFINE_IDA(amdgpu_pasid_ida);
+
+static DEFINE_IDR(amdgpu_pasid_idr);
+static DEFINE_SPINLOCK(amdgpu_pasid_idr_lock);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
@@ -50,8 +53,8 @@ struct amdgpu_pasid_cb {
* amdgpu_pasid_alloc - Allocate a PASID
* @bits: Maximum width of the PASID in bits, must be at least 1
*
- * Allocates a PASID of the given width while keeping smaller PASIDs
- * available if possible.
+ * Uses kernel's IDR cyclic allocator (same as PID allocation).
+ * Allocates sequentially with automatic wrap-around.
*
* Returns a positive integer on success. Returns %-EINVAL if bits==0.
* Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
@@ -59,14 +62,15 @@ struct amdgpu_pasid_cb {
*/
int amdgpu_pasid_alloc(unsigned int bits)
{
- int pasid = -EINVAL;
+ int pasid;
- for (bits = min(bits, 31U); bits > 0; bits--) {
- pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
- (1U << bits) - 1, GFP_KERNEL);
- if (pasid != -ENOSPC)
- break;
- }
+ if (bits == 0)
+ return -EINVAL;
+
+ spin_lock(&amdgpu_pasid_idr_lock);
+ pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1,
+ 1U << bits, GFP_KERNEL);
+ spin_unlock(&amdgpu_pasid_idr_lock);
if (pasid >= 0)
trace_amdgpu_pasid_allocated(pasid);
@@ -81,7 +85,10 @@ int amdgpu_pasid_alloc(unsigned int bits)
void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
- ida_free(&amdgpu_pasid_ida, pasid);
+
+ spin_lock(&amdgpu_pasid_idr_lock);
+ idr_remove(&amdgpu_pasid_idr, pasid);
+ spin_unlock(&amdgpu_pasid_idr_lock);
}
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
@@ -616,3 +623,15 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
}
}
}
+
+/**
+ * amdgpu_pasid_mgr_cleanup - cleanup PASID manager
+ *
+ * Cleanup the IDR allocator.
+ */
+void amdgpu_pasid_mgr_cleanup(void)
+{
+ spin_lock(&amdgpu_pasid_idr_lock);
+ idr_destroy(&amdgpu_pasid_idr);
+ spin_unlock(&amdgpu_pasid_idr_lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index b3649cd3af56..a57919478d3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -74,6 +74,7 @@ int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(u32 pasid);
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
u32 pasid);
+void amdgpu_pasid_mgr_cleanup(void);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 341beec59537..0eecfaa3a94c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -103,10 +103,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
- WARN(1, "Invalid context priority %d\n", args->in.priority);
+ if (!amdgpu_ctx_priority_is_valid(args->in.priority))
return -EINVAL;
- }
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index d94f4966fea9..7f64b783954a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -999,15 +999,11 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
/* Resume all the queues for this process */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
- queue = amdgpu_userq_get(uq_mgr, queue_id);
- if (!queue)
- continue;
if (!amdgpu_userq_buffer_vas_mapped(queue)) {
drm_file_err(uq_mgr->file,
"trying restore queue without va mapping\n");
queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
- amdgpu_userq_put(queue);
continue;
}
@@ -1015,7 +1011,6 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
if (r)
ret = r;
- amdgpu_userq_put(queue);
}
if (ret)
@@ -1232,10 +1227,8 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
}
ret = amdgpu_userq_restore_all(uq_mgr);
- if (ret) {
+ if (ret)
drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
- goto unlock;
- }
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
@@ -1252,13 +1245,9 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Try to unmap all the queues in this process ctx */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
- queue = amdgpu_userq_get(uq_mgr, queue_id);
- if (!queue)
- continue;
r = amdgpu_userq_preempt_helper(queue);
if (r)
ret = r;
- amdgpu_userq_put(queue);
}
if (ret)
@@ -1291,31 +1280,25 @@ amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
int ret;
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
- queue = amdgpu_userq_get(uq_mgr, queue_id);
- if (!queue)
- continue;
-
struct dma_fence *f = queue->last_fence;
- if (!f || dma_fence_is_signaled(f)) {
- amdgpu_userq_put(queue);
+ if (!f || dma_fence_is_signaled(f))
continue;
- }
+
ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
- amdgpu_userq_put(queue);
+
return -ETIMEDOUT;
}
- amdgpu_userq_put(queue);
}
return 0;
}
void
-amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_device *adev = uq_mgr->adev;
int ret;
@@ -1329,8 +1312,6 @@ amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
if (ret)
dev_err(adev->dev, "Failed to evict userqueue\n");
- if (schedule_resume)
- schedule_delayed_work(&uq_mgr->resume_work, 0);
}
int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index f0abc16d02cc..a4d44abf24fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -133,8 +133,7 @@ int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_userq_obj *userq_obj);
-void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
- bool schedule_resume);
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr);
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 781896c9fd26..fe6d83e859a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -705,7 +705,7 @@ amdgpu_userq_wait_count_fences(struct drm_file *filp,
num_fences++;
}
- wait_info->num_fences = num_fences;
+ wait_info->num_fences = min(num_fences, USHRT_MAX);
r = 0;
error_unlock:
@@ -715,6 +715,19 @@ error_unlock:
}
static int
+amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info,
+ struct dma_fence **fences, unsigned int *num_fences,
+ struct dma_fence *fence)
+{
+ /* As fallback shouldn't userspace allocate enough space */
+ if (*num_fences >= wait_info->num_fences)
+ return dma_fence_wait(fence, true);
+
+ fences[(*num_fences)++] = dma_fence_get(fence);
+ return 0;
+}
+
+static int
amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
struct drm_amdgpu_userq_wait *wait_info,
u32 *syncobj_handles, u32 *timeline_points,
@@ -757,13 +770,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
goto free_fences;
dma_fence_unwrap_for_each(f, &iter, fence) {
- if (num_fences >= wait_info->num_fences) {
- r = -EINVAL;
+ r = amdgpu_userq_wait_add_fence(wait_info, fences,
+ &num_fences, f);
+ if (r) {
dma_fence_put(fence);
goto free_fences;
}
-
- fences[num_fences++] = dma_fence_get(f);
}
dma_fence_put(fence);
@@ -780,14 +792,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
if (r)
goto free_fences;
- if (num_fences >= wait_info->num_fences) {
- dma_fence_put(fence);
- r = -EINVAL;
+ r = amdgpu_userq_wait_add_fence(wait_info, fences,
+ &num_fences, fence);
+ dma_fence_put(fence);
+ if (r)
goto free_fences;
- }
- /* Give the reference to the fence array */
- fences[num_fences++] = fence;
}
/* Lock all the GEM objects */
@@ -817,12 +827,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
DMA_RESV_USAGE_READ, fence) {
- if (num_fences >= wait_info->num_fences) {
- r = -EINVAL;
+ r = amdgpu_userq_wait_add_fence(wait_info, fences,
+ &num_fences, fence);
+ if (r)
goto error_unlock;
- }
-
- fences[num_fences++] = dma_fence_get(fence);
}
}
@@ -833,12 +841,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
DMA_RESV_USAGE_WRITE, fence) {
- if (num_fences >= wait_info->num_fences) {
- r = -EINVAL;
+ r = amdgpu_userq_wait_add_fence(wait_info, fences,
+ &num_fences, fence);
+ if (r)
goto error_unlock;
- }
-
- fences[num_fences++] = dma_fence_get(fence);
}
}
@@ -961,13 +967,13 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
}
num_read_bo_handles = wait_info->num_bo_read_handles;
- ptr = u64_to_user_ptr(wait_info->bo_read_handles),
+ ptr = u64_to_user_ptr(wait_info->bo_read_handles);
r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read);
if (r)
goto free_timeline_points;
num_write_bo_handles = wait_info->num_bo_write_handles;
- ptr = u64_to_user_ptr(wait_info->bo_write_handles),
+ ptr = u64_to_user_ptr(wait_info->bo_write_handles);
r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles,
&gobj_write);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3f5712fc7216..73abac6be5b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2916,6 +2916,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
xa_destroy(&adev->vm_manager.pasids);
amdgpu_vmid_mgr_fini(adev);
+ amdgpu_pasid_mgr_cleanup();
}
/**
@@ -2991,14 +2992,14 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
if (!root)
return false;
- addr /= AMDGPU_GPU_PAGE_SIZE;
-
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
- node_id, addr, ts, write_fault)) {
+ node_id, addr >> PAGE_SHIFT, ts, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+
r = amdgpu_bo_reserve(root, true);
if (r)
goto error_unref;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1893ceeeb26c..8b60299b73ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6752,7 +6752,7 @@ static void gfx_v10_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority
*/
- if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ if (prop->hqd_queue_priority == AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM)
priority = 1;
tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index b1a1b8a10a08..78d1f3eb522e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4088,7 +4088,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority
*/
- if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ if (prop->hqd_queue_priority == AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM)
priority = 1;
tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
index 5dcc2c32644a..0e9089544769 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
@@ -2227,7 +2227,7 @@ static int mes_v12_1_self_test(struct amdgpu_device *adev, int xcc_id)
struct amdgpu_bo *meta_bo = NULL, *ctx_bo = NULL;
void *meta_ptr = NULL, *ctx_ptr = NULL;
u64 meta_gpu_addr, ctx_gpu_addr;
- int size, i, r, pasid;;
+ int size, i, r, pasid;
pasid = amdgpu_pasid_alloc(16);
if (pasid < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index e78526a4e521..ff3013b97abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,21 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vcn_v4_0_3_is_psp_fw_reset_supported(struct amdgpu_device *adev)
+{
+ uint32_t fw_ver = adev->psp.sos.fw_version;
+ uint32_t pgm = (fw_ver >> 8) & 0xFF;
+
+ /*
+ * FWDEV-159155: PSP SOS FW must be >= 0x0036015f for program 0x01
+ * before enabling VCN per-queue reset.
+ */
+ if (pgm == 1)
+ return fw_ver >= 0x0036015f;
+
+ return true;
+}
+
static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -141,7 +156,9 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ vcn_v4_0_3_is_psp_fw_reset_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 09dabb3b3297..462a32abf720 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -3170,11 +3170,11 @@ static int kfd_ioctl_create_process(struct file *filep, struct kfd_process *p, v
struct kfd_process *process;
int ret;
- /* Each FD owns only one kfd_process */
- if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
+ if (!filep->private_data || !p)
return -EINVAL;
- if (!filep->private_data || !p)
+ /* Each FD owns only one kfd_process */
+ if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
return -EINVAL;
mutex_lock(&kfd_processes_mutex);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6fb8c4f447a9..1e304c9d2030 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3994,8 +3994,9 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
+ drm_edid_free(aconnector->drm_edid);
+ aconnector->drm_edid = NULL;
if (sink->dc_edid.length == 0) {
- aconnector->drm_edid = NULL;
hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
@@ -5417,7 +5418,7 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
caps = &dm->backlight_caps[aconnector->bl_idx];
/* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
- if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ if (caps->ext_caps && !caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
drm_object_attach_property(&aconnector->base.base,
dm->adev->mode_info.abm_level_property,
ABM_SYSFS_CONTROL);
@@ -9885,7 +9886,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
}
/* Decrement skip count when SR is enabled and we're doing fast updates. */
- if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ if (acrtc_state->update_type <= UPDATE_TYPE_FAST &&
(psr->psr_feature_enabled || pr->config.replay_supported)) {
if (aconn->sr_skip_count > 0)
aconn->sr_skip_count--;
@@ -9898,7 +9899,8 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* a vblank event disable request to enable PSR/RP. PSR SU/RP
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
- * of update events. See `vblank_control_worker()`.
+ * of update events.
+ * See `amdgpu_dm_crtc_vblank_control_worker()`.
*/
if (!vrr_active &&
acrtc_attach->dm_irq_params.allow_sr_entry &&
@@ -10066,8 +10068,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/*
* If the dirty regions changed, PSR-SU need to be disabled temporarily
* and enabled it again after dirty regions are stable to avoid video glitch.
- * PSR-SU will be enabled in vblank_control_worker() if user pause the video
- * during the PSR-SU was disabled.
+ * PSR-SU will be enabled in
+ * amdgpu_dm_crtc_vblank_control_worker() if user
+ * pause the video during the PSR-SU was disabled.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_sr_entry &&
@@ -10093,7 +10096,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* fast updates.
*/
if (crtc->state->async_flip &&
- (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ (acrtc_state->update_type > UPDATE_TYPE_FAST ||
get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
@@ -10101,7 +10104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
- acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->update_type <= UPDATE_TYPE_FAST &&
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
@@ -12529,6 +12532,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_new_crtc_state->mode_changed_independent_from_dsc = new_crtc_state->mode_changed;
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 83fefd902355..d1a14e0c12bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1005,6 +1005,7 @@ struct dm_crtc_state {
bool freesync_vrr_info_changed;
+ bool mode_changed_independent_from_dsc;
bool dsc_force_changed;
bool vrr_supported;
struct mod_freesync_config freesync_config;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index b96fbc03c371..62573173e2ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -812,7 +812,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
unsigned long flags1;
bool forward_roi_change = false;
bool notify_ta = false;
- bool all_crc_ready = true;
struct dc_stream_state *stream_state;
int i;
@@ -936,9 +935,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
continue;
}
- if (!crtc_ctx->crc_info.crc[i].crc_ready)
- all_crc_ready = false;
-
if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
/* Reset the reference frame count after user update the ROI
* or it reaches the maximum value.
@@ -948,9 +944,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
crtc_ctx->crc_info.crc[i].frame_count += 1;
}
spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
-
- if (all_crc_ready)
- complete_all(&crtc_ctx->crc_info.completion);
}
void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 95bdb8699d7f..8538513ea879 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -70,7 +70,6 @@ struct crc_data {
struct crc_info {
struct crc_data crc[MAX_CRC_WINDOW_NUM];
- struct completion completion;
spinlock_t lock;
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 304437c2284d..82727f6ec469 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -685,7 +685,7 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
- dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+ dm_crtc_state->update_type > UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7be50e8c0636..5d8c4c7020b1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1744,9 +1744,11 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ind = find_crtc_index_in_state_by_stream(state, stream);
if (ind >= 0) {
+ struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(state->crtcs[ind].new_state);
+
DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
__func__, __LINE__, stream);
- state->crtcs[ind].new_state->mode_changed = 0;
+ dm_new_crtc_state->base.mode_changed = dm_new_crtc_state->mode_changed_independent_from_dsc;
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
index 97c9f0ce13e4..b4c6522e922c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
@@ -291,6 +291,11 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+
+ /* Clamp the requested clock to PMFW based on DCN limit. */
+ if (dc->debug.min_deep_sleep_dcfclk_khz > 0 && clk_mgr_base->clks.dcfclk_deep_sleep_khz < dc->debug.min_deep_sleep_dcfclk_khz)
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = dc->debug.min_deep_sleep_dcfclk_khz;
+
dcn42_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
@@ -469,6 +474,9 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
internal.CLK8_CLK1_CURRENT_CNT);
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk\n",
+ internal.CLK8_CLK4_CURRENT_CNT);
+
DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
internal.CLK8_CLK3_BYPASS_CNTL);
@@ -569,7 +577,6 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn42 *clk_mgr = TO_CLK_MGR_DCN42(clk_mgr_int);
- struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
(void)dc_logger;
@@ -587,131 +594,7 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
dcn42_dump_clk_registers(&clk_mgr_base->boot_snapshot, clk_mgr);
clk_mgr_base->clks.ref_dtbclk_khz = clk_mgr_base->boot_snapshot.dtbclk * 10;
- if (clk_mgr_base->boot_snapshot.dtbclk > 59000) {
- /*dtbclk enabled based on*/
- clk_mgr_base->clks.dtbclk_en = true;
- }
-
- if (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels != 0) {
- /*skip to get clock table and notify pmfw watermark range again*/
- DC_LOG_SMU("skip to get dpm_clks from pmfw from resume and acr\n");
- return;
- }
-
- smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
- clk_mgr_base->ctx,
- DC_MEM_ALLOC_TYPE_GART,
- sizeof(DpmClocks_t_dcn42),
- &smu_dpm_clks.mc_address.quad_part);
-
- ASSERT(smu_dpm_clks.dpm_clks);
- if (clk_mgr_base->ctx->dc->debug.pstate_enabled && clk_mgr_int->smu_present && smu_dpm_clks.mc_address.quad_part != 0) {
- int i;
- DpmClocks_t_dcn42 *dpm_clks = smu_dpm_clks.dpm_clks;
-
- dcn42_get_dpm_table_from_smu(clk_mgr_int, &smu_dpm_clks);
- DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
- "NumDispClkLevelsEnabled: %d\n"
- "NumSocClkLevelsEnabled: %d\n"
- "VcnClkLevelsEnabled: %d\n"
- "FClkLevelsEnabled: %d\n"
- "NumMemPstatesEnabled: %d\n"
- "MinGfxClk: %d\n"
- "MaxGfxClk: %d\n",
- dpm_clks->NumDcfClkLevelsEnabled,
- dpm_clks->NumDispClkLevelsEnabled,
- dpm_clks->NumSocClkLevelsEnabled,
- dpm_clks->VcnClkLevelsEnabled,
- dpm_clks->NumFclkLevelsEnabled,
- dpm_clks->NumMemPstatesEnabled,
- dpm_clks->MinGfxClk,
- dpm_clks->MaxGfxClk);
-
- for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
- DC_LOG_SMU("dpm_clks->DcfClocks[%d] = %d\n",
- i,
- dpm_clks->DcfClocks[i]);
- }
- for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) {
- DC_LOG_SMU("dpm_clks->DispClocks[%d] = %d\n",
- i, dpm_clks->DispClocks[i]);
- }
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
- DC_LOG_SMU("dpm_clks->SocClocks[%d] = %d\n",
- i, dpm_clks->SocClocks[i]);
- }
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
- DC_LOG_SMU("dpm_clks->FclkClocks_Freq[%d] = %d\n",
- i, dpm_clks->FclkClocks_Freq[i]);
- DC_LOG_SMU("dpm_clks->FclkClocks_Voltage[%d] = %d\n",
- i, dpm_clks->FclkClocks_Voltage[i]);
- }
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- DC_LOG_SMU("dpm_clks->SocVoltage[%d] = %d\n",
- i, dpm_clks->SocVoltage[i]);
-
- for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
- DC_LOG_SMU("dpm_clks.MemPstateTable[%d].UClk = %d\n"
- "dpm_clks->MemPstateTable[%d].MemClk= %d\n"
- "dpm_clks->MemPstateTable[%d].Voltage = %d\n",
- i, dpm_clks->MemPstateTable[i].UClk,
- i, dpm_clks->MemPstateTable[i].MemClk,
- i, dpm_clks->MemPstateTable[i].Voltage);
- }
-
- if (clk_mgr_base->ctx->dc_bios->integrated_info && clk_mgr_base->ctx->dc->config.use_default_clock_table == false) {
- /* DCFCLK */
- dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- dpm_clks->DcfClocks,
- dpm_clks->NumDcfClkLevelsEnabled);
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = dpm_clks->NumDcfClkLevelsEnabled;
-
- /* SOCCLK */
- dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- dpm_clks->SocClocks,
- dpm_clks->NumSocClkLevelsEnabled);
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels = dpm_clks->NumSocClkLevelsEnabled;
-
- /* DISPCLK */
- dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- dpm_clks->DispClocks,
- dpm_clks->NumDispClkLevelsEnabled);
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = dpm_clks->NumDispClkLevelsEnabled;
-
- /* DPPCLK */
- dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
- dpm_clks->DppClocks,
- dpm_clks->NumDispClkLevelsEnabled);
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = dpm_clks->NumDispClkLevelsEnabled;
-
- /* FCLK */
- dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
- dpm_clks->FclkClocks_Freq,
- NUM_FCLK_DPM_LEVELS);
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels = dpm_clks->NumFclkLevelsEnabled;
- clk_mgr_base->bw_params->clk_table.num_entries = dpm_clks->NumFclkLevelsEnabled;
-
- /* Memory Pstate table is in reverse order*/
- ASSERT(dpm_clks->NumMemPstatesEnabled <= NUM_MEM_PSTATE_LEVELS);
- if (dpm_clks->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS)
- dpm_clks->NumMemPstatesEnabled = NUM_MEM_PSTATE_LEVELS;
- for (i = 0; i < dpm_clks->NumMemPstatesEnabled; i++) {
- clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].memclk_mhz = dpm_clks->MemPstateTable[i].UClk;
- clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].wck_ratio = dcn42_convert_wck_ratio(dpm_clks->MemPstateTable[i].WckRatio) ;
- }
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels = dpm_clks->NumMemPstatesEnabled;
-
- /* DTBCLK*/
- clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = clk_mgr_base->clks.ref_dtbclk_khz / 1000;
- clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
- /* Refresh bounding box */
- clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
- clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
- }
- }
- if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr_base->ctx, DC_MEM_ALLOC_TYPE_GART,
- smu_dpm_clks.dpm_clks);
+ clk_mgr_base->clks.dtbclk_en = clk_mgr_base->boot_snapshot.dtbclk > 59000;
}
static struct clk_bw_params dcn42_bw_params = {
@@ -1071,6 +954,127 @@ bool dcn42_is_smu_present(struct clk_mgr *clk_mgr_base)
return clk_mgr->smu_present;
}
+static void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int)
+{
+ struct clk_mgr *clk_mgr_base = &clk_mgr_int->base;
+ struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
+
+ DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
+ (void)dc_logger;
+
+ smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr_base->ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(DpmClocks_t_dcn42),
+ &smu_dpm_clks.mc_address.quad_part);
+
+ ASSERT(smu_dpm_clks.dpm_clks);
+ if (clk_mgr_base->ctx->dc->debug.pstate_enabled && smu_dpm_clks.mc_address.quad_part != 0) {
+ int i;
+ DpmClocks_t_dcn42 *dpm_clks = smu_dpm_clks.dpm_clks;
+
+ dcn42_get_dpm_table_from_smu(clk_mgr_int, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "FClkLevelsEnabled: %d\n"
+ "NumMemPstatesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ dpm_clks->NumDcfClkLevelsEnabled,
+ dpm_clks->NumDispClkLevelsEnabled,
+ dpm_clks->NumSocClkLevelsEnabled,
+ dpm_clks->VcnClkLevelsEnabled,
+ dpm_clks->NumFclkLevelsEnabled,
+ dpm_clks->NumMemPstatesEnabled,
+ dpm_clks->MinGfxClk,
+ dpm_clks->MaxGfxClk);
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ DC_LOG_SMU("dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) {
+ DC_LOG_SMU("dpm_clks->DispClocks[%d] = %d\n",
+ i, dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ DC_LOG_SMU("dpm_clks->SocClocks[%d] = %d\n",
+ i, dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ DC_LOG_SMU("dpm_clks->FclkClocks_Freq[%d] = %d\n",
+ i, dpm_clks->FclkClocks_Freq[i]);
+ DC_LOG_SMU("dpm_clks->FclkClocks_Voltage[%d] = %d\n",
+ i, dpm_clks->FclkClocks_Voltage[i]);
+ }
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
+ DC_LOG_SMU("dpm_clks->SocVoltage[%d] = %d\n",
+ i, dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("dpm_clks.MemPstateTable[%d].UClk = %d\n"
+ "dpm_clks->MemPstateTable[%d].MemClk= %d\n"
+ "dpm_clks->MemPstateTable[%d].Voltage = %d\n",
+ i, dpm_clks->MemPstateTable[i].UClk,
+ i, dpm_clks->MemPstateTable[i].MemClk,
+ i, dpm_clks->MemPstateTable[i].Voltage);
+ }
+
+ if (clk_mgr_base->ctx->dc_bios->integrated_info && clk_mgr_base->ctx->dc->config.use_default_clock_table == false) {
+ /* DCFCLK */
+ dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
+ dpm_clks->DcfClocks,
+ dpm_clks->NumDcfClkLevelsEnabled);
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = dpm_clks->NumDcfClkLevelsEnabled;
+
+ /* SOCCLK */
+ dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
+ dpm_clks->SocClocks,
+ dpm_clks->NumSocClkLevelsEnabled);
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels = dpm_clks->NumSocClkLevelsEnabled;
+
+ /* DISPCLK */
+ dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
+ dpm_clks->DispClocks,
+ dpm_clks->NumDispClkLevelsEnabled);
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = dpm_clks->NumDispClkLevelsEnabled;
+
+ /* DPPCLK */
+ dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ dpm_clks->DppClocks,
+ dpm_clks->NumDispClkLevelsEnabled);
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = dpm_clks->NumDispClkLevelsEnabled;
+
+ /* FCLK */
+ dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ dpm_clks->FclkClocks_Freq,
+ NUM_FCLK_DPM_LEVELS);
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels = dpm_clks->NumFclkLevelsEnabled;
+ clk_mgr_base->bw_params->clk_table.num_entries = dpm_clks->NumFclkLevelsEnabled;
+
+ /* Memory Pstate table is in reverse order*/
+ ASSERT(dpm_clks->NumMemPstatesEnabled <= NUM_MEM_PSTATE_LEVELS);
+ if (dpm_clks->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS)
+ dpm_clks->NumMemPstatesEnabled = NUM_MEM_PSTATE_LEVELS;
+ for (i = 0; i < dpm_clks->NumMemPstatesEnabled; i++) {
+ clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].memclk_mhz = dpm_clks->MemPstateTable[i].MemClk;
+ clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].wck_ratio = dcn42_convert_wck_ratio(dpm_clks->MemPstateTable[i].WckRatio) ;
+ }
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels = dpm_clks->NumMemPstatesEnabled;
+
+ /* DTBCLK*/
+ clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = 600; /* Fixed on platform */
+ clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
+ }
+ }
+ if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr_base->ctx, DC_MEM_ALLOC_TYPE_GART,
+ smu_dpm_clks.dpm_clks);
+}
+
static struct clk_mgr_funcs dcn42_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1139,6 +1143,11 @@ void dcn42_clk_mgr_construct(
dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;
clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn42_smu_get_dtbclk(&clk_mgr->base);
+
+ clk_mgr->base.base.bw_params = &dcn42_bw_params;
+
+ if (clk_mgr->base.smu_present)
+ dcn42_get_smu_clocks(&clk_mgr->base);
}
/* in case we don't get a value from the BIOS, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
@@ -1153,6 +1162,8 @@ void dcn42_clk_mgr_construct(
dcn42_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn42_bw_params;
+ if (clk_mgr->base.smu_present)
+ dcn42_get_smu_clocks(&clk_mgr->base);
}
void dcn42_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8b9c686eefd2..7dac3f35f0e8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2695,7 +2695,7 @@ static bool is_surface_in_context(
static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
return update_type;
@@ -2769,28 +2769,12 @@ static struct surface_update_descriptor get_plane_info_update_type(const struct
if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
- switch (tiling->gfxversion) {
- case DcGfxVersion9:
- case DcGfxVersion10:
- case DcGfxVersion11:
- if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
- break;
- case DcGfxAddr3:
- if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
- break;
- case DcGfxVersion7:
- case DcGfxVersion8:
- case DcGfxVersionUnknown:
- default:
- break;
+ if (tiling->flags.avoid_full_update_on_tiling_change) {
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ } else {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
@@ -2803,7 +2787,7 @@ static struct surface_update_descriptor get_scaling_info_update_type(
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
return update_type;
@@ -2854,11 +2838,11 @@ static struct surface_update_descriptor get_scaling_info_update_type(
return update_type;
}
-static struct surface_update_descriptor det_surface_update(
+static struct surface_update_descriptor check_update_surface(
const struct dc_check_config *check_config,
struct dc_surface_update *u)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
if (u->surface->force_full_update) {
@@ -2878,7 +2862,7 @@ static struct surface_update_descriptor det_surface_update(
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_STREAM);
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
@@ -2892,27 +2876,43 @@ static struct surface_update_descriptor det_surface_update(
if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (u->cursor_csc_color_matrix) {
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
@@ -2928,9 +2928,10 @@ static struct surface_update_descriptor det_surface_update(
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
- // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type,
+ check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
+ LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
@@ -2984,7 +2985,7 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
int surface_count,
struct dc_stream_update *stream_update)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
/* When countdown finishes, promote this flip to full to trigger deferred final transition */
if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) {
@@ -3051,7 +3052,18 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
if (su_flags->raw)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- // Non-global cases
+ /* Non-global cases */
+ if (stream_update->hdr_static_metadata ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
@@ -3061,11 +3073,32 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
su_flags->bits.out_tf = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
+
+ if (stream_update->periodic_interrupt) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->dither_option) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->cursor_position || stream_update->cursor_attributes) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ /* TODO - cleanup post blend CM */
+ if (stream_update->func_shaper || stream_update->lut3d_func) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
+
+ if (stream_update->pending_test_pattern) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
for (int i = 0 ; i < surface_count; i++) {
struct surface_update_descriptor inner_type =
- det_surface_update(check_config, &updates[i]);
+ check_update_surface(check_config, &updates[i]);
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
@@ -3092,6 +3125,81 @@ struct surface_update_descriptor dc_check_update_surfaces_for_stream(
return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
+/*
+ * check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * This function performs checks on the DC global state, and is therefore not re-entrant. It
+ * should not be called from DM.
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+static struct surface_update_descriptor check_update_state_and_surfaces_for_stream(
+ const struct dc *dc,
+ const struct dc_check_config *check_config,
+ const struct dc_stream_state *stream,
+ const struct dc_surface_update *updates,
+ const int surface_count,
+ const struct dc_stream_update *stream_update)
+{
+ const struct dc_state *context = dc->current_state;
+
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE};
+
+ if (updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, updates[i].surface))
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ if (dc->idle_optimizations_allowed)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ if (dc_can_clear_cursor_limit(dc))
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+
+ return overall_type;
+}
+
+/*
+ * dc_check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * This function performs checks on the DC global state, stream and surface update, and is
+ * therefore not re-entrant. It should not be called from DM.
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+static struct surface_update_descriptor dc_check_update_state_and_surfaces_for_stream(
+ const struct dc *dc,
+ const struct dc_check_config *check_config,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ /* check updates against the entire DC state (global) first */
+ struct surface_update_descriptor overall_update_type = check_update_state_and_surfaces_for_stream(
+ dc,
+ check_config,
+ stream,
+ updates,
+ surface_count,
+ stream_update);
+
+ /* check updates for stream and plane */
+ struct surface_update_descriptor stream_update_type = dc_check_update_surfaces_for_stream(
+ check_config,
+ updates,
+ surface_count,
+ stream_update);
+ elevate_update_type(&overall_update_type, stream_update_type.update_type, stream_update_type.lock_descriptor);
+
+ return overall_update_type;
+}
+
static struct dc_stream_status *stream_get_status(
struct dc_state *ctx,
struct dc_stream_state *stream)
@@ -3448,13 +3556,6 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
-static bool full_update_required_weak(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream);
-
struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
@@ -3524,12 +3625,11 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
- enum surface_update_type *new_update_type,
+ struct surface_update_descriptor *update_descriptor,
struct dc_state **new_context)
{
struct dc_state *context;
int i, j;
- enum surface_update_type update_type;
const struct dc_stream_status *stream_status;
struct dc_context *dc_ctx = dc->ctx;
@@ -3543,17 +3643,20 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- update_type = dc_check_update_surfaces_for_stream(
- &dc->check_config, srf_updates, surface_count, stream_update).update_type;
- if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
- update_type = UPDATE_TYPE_FULL;
+ *update_descriptor = dc_check_update_state_and_surfaces_for_stream(
+ dc,
+ &dc->check_config,
+ stream,
+ srf_updates,
+ surface_count,
+ stream_update);
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
*/
force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
- if (update_type == UPDATE_TYPE_FULL)
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
@@ -3579,7 +3682,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
- if (update_type == UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
if (stream_update) {
uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
@@ -3589,13 +3692,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
- if (update_type >= update_surface_trace_level)
+ if (update_descriptor->update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
for (i = 0; i < surface_count; i++)
copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
- if (update_type >= UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
for (i = 0; i < surface_count; i++)
@@ -3633,7 +3736,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_type != UPDATE_TYPE_MED)
+ if (update_descriptor->update_type != UPDATE_TYPE_MED)
continue;
if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -3647,7 +3750,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
- if (update_type == UPDATE_TYPE_FULL) {
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
struct pipe_split_policy_backup policy;
bool minimize = false;
@@ -3676,8 +3779,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
- *new_update_type = update_type;
- if (update_type == UPDATE_TYPE_FULL)
+ if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3757,7 +3859,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
program_cursor_position(dc, stream);
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
if (stream_update->dsc_config)
@@ -4066,7 +4168,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
- bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
if (should_lock_all_pipes)
determine_pipe_unlock_order(dc, context);
@@ -4126,7 +4228,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type == UPDATE_TYPE_FAST &&
+ if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4183,7 +4285,7 @@ static void commit_planes_for_stream(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
@@ -4200,7 +4302,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
- if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ if (update_type > UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4262,7 +4364,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_inbox1_lock(dc, stream->link)) {
@@ -4333,7 +4435,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
@@ -4349,7 +4451,7 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type > UPDATE_TYPE_FAST) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -4377,7 +4479,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type == UPDATE_TYPE_FAST &&
+ if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4404,7 +4506,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
stream_status =
@@ -4423,7 +4525,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
- if (update_type == UPDATE_TYPE_FAST)
+ if (update_type <= UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
@@ -4434,7 +4536,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.program_front_end_for_ctx && update_type > UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
//Pipe busy until some frame and line #
@@ -4462,7 +4564,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
// Update Type FAST, Surface updates
- if (update_type == UPDATE_TYPE_FAST) {
+ if (update_type <= UPDATE_TYPE_FAST) {
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -4499,7 +4601,7 @@ static void commit_planes_for_stream(struct dc *dc,
srf_updates[i].cm->flags.bits.lut3d_enable &&
srf_updates[i].cm->flags.bits.lut3d_dma_enable &&
dc->hwss.trigger_3dlut_dma_load)
- dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
+ dc->hwss.trigger_3dlut_dma_load(pipe_ctx);
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
@@ -4519,7 +4621,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
@@ -4552,13 +4654,13 @@ static void commit_planes_for_stream(struct dc *dc,
/* If enabling subvp or transitioning from subvp->subvp, enable the
* phantom streams before we program front end for the phantom pipes.
*/
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type > UPDATE_TYPE_FAST) {
if (dc->hwss.enable_phantom_streams)
dc->hwss.enable_phantom_streams(dc, context);
}
}
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (subvp_prev_use && !subvp_curr_use) {
@@ -4571,7 +4673,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.disable_phantom_streams(dc, context);
}
- if (update_type != UPDATE_TYPE_FAST)
+ if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
@@ -5043,191 +5145,12 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
-void populate_fast_updates(struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_update *stream_update)
-{
- int i = 0;
-
- if (stream_update) {
- fast_update[0].out_transfer_func = stream_update->out_transfer_func;
- fast_update[0].output_csc_transform = stream_update->output_csc_transform;
- } else {
- fast_update[0].out_transfer_func = NULL;
- fast_update[0].output_csc_transform = NULL;
- }
-
- for (i = 0; i < surface_count; i++) {
- fast_update[i].flip_addr = srf_updates[i].flip_addr;
- fast_update[i].gamma = srf_updates[i].gamma;
- fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
- fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
- fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
- fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
- fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
- }
-}
-
-static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
-{
- int i;
-
- if (fast_update[0].out_transfer_func ||
- fast_update[0].output_csc_transform)
- return true;
-
- for (i = 0; i < surface_count; i++) {
- if (fast_update[i].flip_addr ||
- fast_update[i].gamma ||
- fast_update[i].gamut_remap_matrix ||
- fast_update[i].input_csc_color_matrix ||
- fast_update[i].cursor_csc_color_matrix ||
- fast_update[i].cm_hist_control ||
- fast_update[i].coeff_reduction_factor)
- return true;
- }
-
- return false;
-}
-
-bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
-{
- int i;
-
- if (fast_update[0].out_transfer_func ||
- fast_update[0].output_csc_transform)
- return true;
-
- for (i = 0; i < surface_count; i++) {
- if (fast_update[i].input_csc_color_matrix ||
- fast_update[i].gamma ||
- fast_update[i].gamut_remap_matrix ||
- fast_update[i].coeff_reduction_factor ||
- fast_update[i].cm_hist_control ||
- fast_update[i].cursor_csc_color_matrix)
- return true;
- }
-
- return false;
-}
-
-static bool full_update_required_weak(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- const struct dc_state *context = dc->current_state;
- if (srf_updates)
- for (int i = 0; i < surface_count; i++)
- if (!is_surface_in_context(context, srf_updates[i].surface))
- return true;
-
- if (stream) {
- const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
- return false;
-}
-
-static bool full_update_required(
- const struct dc *dc,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- const union dc_plane_cm_flags blend_only_flags = {
- .bits = {
- .blend_enable = 1,
- }
- };
-
- if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
- return true;
-
- for (int i = 0; i < surface_count; i++) {
- if (srf_updates &&
- (srf_updates[i].plane_info ||
- srf_updates[i].scaling_info ||
- (srf_updates[i].hdr_mult.value &&
- srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
- (srf_updates[i].sdr_white_level_nits &&
- srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
- srf_updates[i].in_transfer_func ||
- srf_updates[i].surface->force_full_update ||
- (srf_updates[i].flip_addr &&
- srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
- (srf_updates[i].cm &&
- ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
- (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
- return true;
- }
-
- if (stream_update &&
- (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
- (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
- stream_update->integer_scaling_update) ||
- stream_update->hdr_static_metadata ||
- stream_update->abm_level ||
- stream_update->periodic_interrupt ||
- stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket ||
- stream_update->vsp_infopacket ||
- stream_update->hfvsif_infopacket ||
- stream_update->vtem_infopacket ||
- stream_update->adaptive_sync_infopacket ||
- stream_update->avi_infopacket ||
- stream_update->dpms_off ||
- stream_update->allow_freesync ||
- stream_update->vrr_active_variable ||
- stream_update->vrr_active_fixed ||
- stream_update->gamut_remap ||
- stream_update->output_color_space ||
- stream_update->dither_option ||
- stream_update->wb_update ||
- stream_update->dsc_config ||
- stream_update->mst_bw_update ||
- stream_update->func_shaper ||
- stream_update->lut3d_func ||
- stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update ||
- stream_update->hw_cursor_req))
- return true;
-
- return false;
-}
-
-static bool fast_update_only(
- const struct dc *dc,
- const struct dc_fast_update *fast_update,
- const struct dc_surface_update *srf_updates,
- int surface_count,
- const struct dc_stream_update *stream_update,
- const struct dc_stream_state *stream)
-{
- return fast_updates_exist(fast_update, surface_count)
- && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
- enum surface_update_type update_type;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
@@ -5235,11 +5158,9 @@ static bool update_planes_and_stream_v2(struct dc *dc,
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
- bool is_fast_update_only;
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
- surface_count, stream_update, stream);
+ struct surface_update_descriptor update_descriptor = {0};
+
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
@@ -5258,7 +5179,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
- &update_type,
+ &update_descriptor,
&context))
return false;
@@ -5268,7 +5189,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
dc_state_release(context);
return false;
}
- update_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&update_descriptor, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
@@ -5277,13 +5198,13 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
+ if (update_descriptor.update_type <= UPDATE_TYPE_FAST) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
- update_type,
+ update_descriptor.update_type,
context);
} else {
if (!stream_update &&
@@ -5299,7 +5220,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
- update_type,
+ update_descriptor.update_type,
context);
}
if (dc->current_state != context)
@@ -5313,14 +5234,8 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
struct dc_stream_update *stream_update,
enum surface_update_type update_type)
{
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
ASSERT(update_type < UPDATE_TYPE_FULL);
- populate_fast_updates(fast_update, srf_updates, surface_count,
- stream_update);
- if (fast_update_only(dc, fast_update, srf_updates, surface_count,
- stream_update, stream) &&
- !dc->check_config.enable_legacy_fast_update)
+ if (update_type <= UPDATE_TYPE_FAST)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5411,7 +5326,7 @@ static bool update_planes_and_stream_v3(struct dc *dc,
struct dc_stream_update *stream_update)
{
struct dc_state *new_context;
- enum surface_update_type update_type;
+ struct surface_update_descriptor update_descriptor = {0};
/*
* When this function returns true and new_context is not equal to
@@ -5423,22 +5338,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
* replaced by a newer context. Refer to the use of
* swap_and_free_current_context below.
*/
- if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
- stream, stream_update, &update_type,
+ if (!update_planes_and_stream_state(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_descriptor,
&new_context))
return false;
if (new_context == dc->current_state) {
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_type);
+ stream_update, update_descriptor.update_type);
if (dc->check_config.transition_countdown_to_steady_state)
dc->check_config.transition_countdown_to_steady_state--;
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_type, new_context);
+ stream_update, update_descriptor.update_type, new_context);
}
return true;
@@ -7212,7 +7131,7 @@ struct dc_update_scratch_space {
struct dc_stream_update *stream_update;
bool update_v3;
bool do_clear_update_flags;
- enum surface_update_type update_type;
+ struct surface_update_descriptor update_descriptor;
struct dc_state *new_context;
enum update_v3_flow flow;
struct dc_state *backup_context;
@@ -7295,45 +7214,28 @@ static bool update_planes_and_stream_prepare_v3(
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
- /* HWSS path determination needs to be done prior to updating the surface and stream states. */
- struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
-
- populate_fast_updates(fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update);
-
- const bool is_hwss_fast_path_only =
- fast_update_only(scratch->dc,
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update,
- scratch->stream) &&
- !scratch->dc->check_config.enable_legacy_fast_update;
-
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- &scratch->update_type,
+ &scratch->update_descriptor,
&scratch->new_context
)) {
return false;
}
if (scratch->new_context == scratch->dc->current_state) {
- ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_descriptor.update_type < UPDATE_TYPE_FULL);
- scratch->flow = is_hwss_fast_path_only
+ scratch->flow = scratch->update_descriptor.update_type <= UPDATE_TYPE_FAST
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
}
- ASSERT(scratch->update_type >= UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_descriptor.update_type >= UPDATE_TYPE_FULL);
const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless(
scratch->dc,
@@ -7406,7 +7308,7 @@ static void update_planes_and_stream_execute_v3_commit(
intermediate_update ? scratch->intermediate_count : scratch->surface_count,
scratch->stream,
use_stream_update ? scratch->stream_update : NULL,
- intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type,
+ intermediate_context ? UPDATE_TYPE_FULL : scratch->update_descriptor.update_type,
// `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context`
intermediate_context ? scratch->intermediate_context : scratch->new_context
);
@@ -7424,7 +7326,7 @@ static void update_planes_and_stream_execute_v3(
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- scratch->update_type,
+ scratch->update_descriptor.update_type,
scratch->new_context
);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8271b12c1a66..727bcf08a84f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -749,10 +749,10 @@ struct clock_source *resource_find_used_clk_src_for_sharing(
return NULL;
}
-static enum pixel_format convert_pixel_format_to_dalsurface(
+static enum dc_pixel_format convert_pixel_format_to_dalsurface(
enum surface_pixel_format surface_pixel_format)
{
- enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+ enum dc_pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
switch (surface_pixel_format) {
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 908f79b02102..daa7ab362239 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,7 +33,6 @@
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
-#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
@@ -259,7 +258,6 @@ void program_cursor_attributes(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
- bool unlock_dmub = false;
if (!stream)
return;
@@ -278,12 +276,6 @@ void program_cursor_attributes(
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
} else {
- if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
- should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
- dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
- unlock_dmub = true;
- }
-
dc->hwss.cursor_lock(dc, pipe_to_program, true);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
@@ -306,9 +298,6 @@ void program_cursor_attributes(
dc->hwss.cursor_lock(dc, pipe_to_program, false);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
-
- if (unlock_dmub)
- dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
}
}
}
@@ -416,7 +405,6 @@ void program_cursor_position(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
- bool unlock_dmub = false;
if (!stream)
return;
@@ -436,16 +424,10 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
- } else {
- if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
- should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
- dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
- unlock_dmub = true;
- }
+ else
dc->hwss.cursor_lock(dc, pipe_to_program, true);
- }
}
dc->hwss.set_cursor_position(pipe_ctx);
@@ -457,14 +439,10 @@ void program_cursor_position(
}
if (pipe_to_program) {
- if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
- } else {
+ else
dc->hwss.cursor_lock(dc, pipe_to_program, false);
-
- if (unlock_dmub)
- dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0b48feaba131..80e217c5a23d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
-#define DC_VER "3.2.374"
+#define DC_VER "3.2.375"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -467,6 +467,7 @@ struct dc_static_screen_params {
*/
enum surface_update_type {
+ UPDATE_TYPE_ADDR_ONLY, /* only surface address is being updated, no other programming needed */
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
UPDATE_TYPE_FULL, /* may need to shuffle resources */
@@ -562,6 +563,7 @@ struct dc_config {
bool frame_update_cmd_version2;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
+ bool no_native422_support;
};
enum visual_confirm {
@@ -986,7 +988,6 @@ struct link_service;
* causing an issue or not.
*/
struct dc_debug_options {
- bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
int visual_confirm_rect_height;
@@ -1215,6 +1216,7 @@ struct dc_debug_options {
bool enable_dmu_recovery;
unsigned int force_vmin_threshold;
bool enable_otg_frame_sync_pwa;
+ unsigned int min_deep_sleep_dcfclk_khz;
};
@@ -1879,18 +1881,6 @@ struct dc_scaling_info {
struct scaling_taps scaling_quality;
};
-struct dc_fast_update {
- const struct dc_flip_addrs *flip_addr;
- const struct dc_gamma *gamma;
- const struct colorspace_transform *gamut_remap_matrix;
- const struct dc_csc_transform *input_csc_color_matrix;
- const struct fixed31_32 *coeff_reduction_factor;
- struct dc_transfer_func *out_transfer_func;
- struct dc_csc_transform *output_csc_transform;
- const struct dc_csc_transform *cursor_csc_color_matrix;
- struct cm_hist_control *cm_hist_control;
-};
-
struct dc_surface_update {
struct dc_plane_state *surface;
@@ -2029,12 +2019,7 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
-void populate_fast_updates(struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_update *stream_update);
-/*
+ /*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9d18f1c08079..101bce6b8de6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -52,6 +52,7 @@ struct dc_dsc_policy {
uint32_t max_target_bpp;
uint32_t min_target_bpp;
bool enable_dsc_when_not_needed;
+ bool ycbcr422_simple;
};
struct dc_dsc_config_options {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9bf853edc46f..c2ca08d26e37 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -218,7 +218,7 @@ enum surface_pixel_format {
/* Pixel format */
-enum pixel_format {
+enum dc_pixel_format {
/*graph*/
PIXEL_FORMAT_UNINITIALIZED,
PIXEL_FORMAT_INDEX8,
@@ -445,6 +445,10 @@ enum dc_gfxversion {
enum swizzle_mode_addr3_values swizzle;
} gfx_addr3;/*gfx with addr3 and above*/
};
+
+ struct {
+ bool avoid_full_update_on_tiling_change;
+ } flags;
};
/* Rotation angle */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index ba7bf23f2b2f..52ed8deebf63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -63,7 +63,8 @@ static void populate_inits_from_splinits(struct scl_inits *inits,
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
-static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format, const enum pixel_format pixel_format)
+static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format,
+ const enum dc_pixel_format pixel_format)
{
if (pixel_format < PIXEL_FORMAT_INVALID)
*spl_pixel_format = (enum spl_pixel_format)pixel_format;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e224077c8902..fd8ec1660312 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1144,10 +1144,12 @@ union replay_low_refresh_rate_enable_options {
union replay_optimization {
struct {
- //BIT[0-3]: Replay Teams Optimization
+ //BIT[0-1]: Replay Teams Optimization
unsigned int TEAMS_OPTIMIZATION_VER_1 :1;
unsigned int TEAMS_OPTIMIZATION_VER_2 :1;
- unsigned int RESERVED_2_3 :2;
+ //BIT[2]: Replay Live Capture with CVT
+ unsigned int LIVE_CAPTURE_WITH_CVT :1;
+ unsigned int RESERVED_3 :1;
} bits;
unsigned int raw;
@@ -1196,6 +1198,8 @@ struct replay_config {
bool frame_skip_supported;
/* Replay Received Frame Skipping Error HPD. */
bool received_frame_skipping_error_hpd;
+ /* Live capture with CVT is activated */
+ bool live_capture_with_cvt_activated;
};
/* Replay feature flags*/
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
index d9831b0f8235..2076565b1caa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
@@ -122,6 +122,7 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GLOBAL_FGCG_REP_CNTL, DCCG_GLOBAL_FGCG_REP_DIS, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 2, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 2ba3d3a3aac5..a368802ba51d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -122,6 +122,33 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.program_hpd_filter = dce110_program_hpd_filter,
};
+static const struct link_encoder_funcs dce110_lnk_enc_funcs_no_hpd = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce110_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
+ .enable_lvds_output = dce110_link_encoder_enable_lvds_output,
+ .enable_analog_output = dce110_link_encoder_enable_analog_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .is_dig_enabled = dce110_is_dig_enabled,
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend,
+ .get_hpd_state = dce110_get_hpd_state,
+ .program_hpd_filter = dce110_program_hpd_filter,
+};
+
static enum bp_result link_transmitter_control(
struct dce110_link_encoder *enc110,
struct bp_transmitter_control *cntl)
@@ -865,7 +892,10 @@ void dce110_link_encoder_construct(
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
- enc110->base.funcs = &dce110_lnk_enc_funcs;
+ if (hpd_regs)
+ enc110->base.funcs = &dce110_lnk_enc_funcs;
+ else
+ enc110->base.funcs = &dce110_lnk_enc_funcs_no_hpd;
enc110->base.ctx = init_data->ctx;
enc110->base.id = init_data->encoder;
enc110->base.analog_id = init_data->analog_encoder;
@@ -1855,6 +1885,33 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.program_hpd_filter = dce110_program_hpd_filter,
};
+static const struct link_encoder_funcs dce60_lnk_enc_funcs_no_hpd = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce60_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output,
+ .enable_lvds_output = dce110_link_encoder_enable_lvds_output,
+ .enable_analog_output = dce110_link_encoder_enable_analog_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .is_dig_enabled = dce110_is_dig_enabled,
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend,
+ .get_hpd_state = dce110_get_hpd_state,
+ .program_hpd_filter = dce110_program_hpd_filter,
+};
+
void dce60_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -1867,7 +1924,10 @@ void dce60_link_encoder_construct(
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
- enc110->base.funcs = &dce60_lnk_enc_funcs;
+ if (hpd_regs)
+ enc110->base.funcs = &dce60_lnk_enc_funcs;
+ else
+ enc110->base.funcs = &dce60_lnk_enc_funcs_no_hpd;
enc110->base.ctx = init_data->ctx;
enc110->base.id = init_data->encoder;
enc110->base.analog_id = init_data->analog_encoder;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b30d16474ceb..f6402e199354 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -202,7 +202,7 @@ static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multipl
return (num - remainder);
}
-static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
+static unsigned int dml_get_num_active_pipes(unsigned int num_planes, const struct core_display_cfg_support_info *cfg_support_info)
{
unsigned int num_active_pipes = 0;
@@ -546,9 +546,9 @@ static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
return is_vert;
}
-static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
+static unsigned int dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
{
- int unsigned version = 0;
+ unsigned int version = 0;
if (sw_mode == dml2_sw_linear ||
sw_mode == dml2_sw_256b_2d ||
@@ -1761,7 +1761,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
// VBA_DELTA, VBA doesn't have programming value for pte row height linear.
- *p->dpte_row_height_linear = (unsigned int)1 << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
+ *p->dpte_row_height_linear = 1U << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
if (*p->dpte_row_height_linear > 128)
*p->dpte_row_height_linear = 128;
@@ -3377,7 +3377,7 @@ static void calculate_cursor_req_attributes(
DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : 1U << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -12205,15 +12205,15 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
{
double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
- wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
- wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
- wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
- wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
- wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
- wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
- wm_regs->usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
+ wm_regs->fclk_pstate = (unsigned int)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter = (unsigned int)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit = (unsigned int)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (unsigned int)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (unsigned int)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->temp_read_or_ppt = (unsigned int)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
+ wm_regs->uclk_pstate = (unsigned int)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
+ wm_regs->urgent = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ wm_regs->usr = (unsigned int)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
wm_regs->refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.UrgentLatency * refclk_freq_in_mhz);
wm_regs->refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.MetaTripToMemory * refclk_freq_in_mhz);
wm_regs->frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
@@ -12692,7 +12692,7 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < 8U);
DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
@@ -13248,7 +13248,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.cstate_max_cap_mode = dml_get_cstate_max_cap_mode(mode_lib);
- out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
+ out->min_clocks.dcn4x.dpprefclk_khz = (unsigned int)dml_get_global_dppclk_khz(mode_lib);
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 74812a7d5e28..fd3c61509f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
@@ -143,7 +143,7 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
{
int i;
unsigned int num_found = 0;
- unsigned int plane_id_assigned_to_pipe = -1;
+ unsigned int plane_id_assigned_to_pipe = UINT_MAX;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index d834cb595afa..e25b88f2d6b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -1174,12 +1174,12 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index)
{
unsigned int plane_id;
- int i = 0;
- int location = -1;
+ unsigned int i = 0;
+ unsigned int location = UINT_MAX;
if (!get_plane_id(context->bw_ctx.dml2, context, plane, stream_id, plane_index, &plane_id)) {
ASSERT(false);
- return -1;
+ return UINT_MAX;
}
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
index 808bca9fb804..0d2c9fcd3362 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
@@ -102,7 +102,7 @@ static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
}
}
-static bool dpp1_dscl_is_video_format(enum pixel_format format)
+static bool dpp1_dscl_is_video_format(enum dc_pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
@@ -111,7 +111,7 @@ static bool dpp1_dscl_is_video_format(enum pixel_format format)
return false;
}
-static bool dpp1_dscl_is_420_format(enum pixel_format format)
+static bool dpp1_dscl_is_420_format(enum dc_pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 8b6155f9122f..8b7e55e337d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -94,7 +94,7 @@ static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
}
}
-static bool dpp401_dscl_is_video_format(enum pixel_format format)
+static bool dpp401_dscl_is_video_format(enum dc_pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
@@ -103,7 +103,7 @@ static bool dpp401_dscl_is_video_format(enum pixel_format format)
return false;
}
-static bool dpp401_dscl_is_420_format(enum pixel_format format)
+static bool dpp401_dscl_is_420_format(enum dc_pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 5b3584ad5b6b..8dfb6dd14eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -680,9 +680,6 @@ static void get_dsc_enc_caps(
} else {
build_dsc_enc_caps(dsc, dsc_enc_caps);
}
-
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -1100,13 +1097,14 @@ static bool setup_dsc_config(
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR422:
- is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
- sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
- branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
- if (!is_dsc_possible) {
+ if (policy.ycbcr422_simple) {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422;
dsc_cfg->ycbcr422_simple = is_dsc_possible;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
+ } else {
+ is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
+ sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
+ branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
}
break;
case PIXEL_ENCODING_YCBCR420:
@@ -1406,6 +1404,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->min_target_bpp = 8;
/* DP specs limits to 3 x bpc */
policy->max_target_bpp = 3 * bpc;
+ policy->ycbcr422_simple = true;
break;
case PIXEL_ENCODING_YCBCR420:
/* DP specs limits to 6 */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 242f1e6f0d8f..6e1e759462bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -100,7 +100,7 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index e712985f7abd..17acb64a9d80 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -128,7 +128,7 @@ void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int m
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index c1bdbb38c690..bbb8b5b18a4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -78,7 +78,7 @@ static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsign
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
index d6e6fbaa041b..a436fa71d4b4 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
@@ -6,6 +6,7 @@
#include "dcn31/dcn31_hubbub.h"
#include "dcn32/dcn32_hubbub.h"
#include "dcn35/dcn35_hubbub.h"
+#include "dcn401/dcn401_hubbub.h"
#include "dcn42/dcn42_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
@@ -429,15 +430,6 @@ static void hubbub42_allow_self_refresh_control(struct hubbub *hubbub, bool allo
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
-
- if (!allow && hubbub->ctx->dc->debug.disable_stutter) {/*controlled by registry key*/
- REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
- DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE, 0,
- DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE, 1);
- REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
- }
}
static void hubbub42_set_sdp_control(struct hubbub *hubbub, bool dc_control)
{
@@ -494,6 +486,46 @@ static bool hubbub42_program_watermarks(
return wm_pending;
}
+static void hubbub42_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t request_limit = 96; //MAX(12 * memory_channel_count, 96);
+
+ REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit);
+}
+
+static bool dcn42_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ bool wm_pending = false;
+ uint32_t temp;
+
+ /* request backpressure and outstanding return threshold (unused)*/
+ //REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold);
+
+ /* 401 delta: do not update P-State stall threshold (handled by fw) */
+ // REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold);
+
+ if (safe_to_lower || arb_regs->allow_sdpif_rate_limit_when_cstate_req > hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
+ hubbub2->allow_sdpif_rate_limit_when_cstate_req = arb_regs->allow_sdpif_rate_limit_when_cstate_req;
+
+ /* only update the required bits */
+ REG_GET(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, &temp);
+ if (hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
+ temp |= (1 << 5);
+ } else {
+ temp &= ~(1 << 5);
+ }
+ REG_UPDATE(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, temp);
+ } else {
+ wm_pending = true;
+ }
+
+ return wm_pending;
+}
+
static const struct hubbub_funcs hubbub42_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
@@ -509,13 +541,16 @@ static const struct hubbub_funcs hubbub42_funcs = {
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub35_init_watermarks,
- .program_det_size = dcn32_program_det_size,
- .program_compbuf_size = dcn35_program_compbuf_size,
- .init_crb = dcn35_init_crb,
+ .init_crb = dcn401_init_crb,
+ .dchvm_init = dcn35_dchvm_init,
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
- .dchubbub_init = hubbub35_init,
- .dchvm_init = dcn35_dchvm_init,
+ .set_request_limit = hubbub42_set_request_limit,
+ .program_det_segments = dcn401_program_det_segments,
+ .program_compbuf_segments = dcn401_program_compbuf_segments,
+ .wait_for_det_update = dcn401_wait_for_det_update,
+ .program_arbiter = dcn42_program_arbiter,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub42_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
index 0e33c739f459..d85a4ab957a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
@@ -245,6 +245,39 @@ static void hubp42_program_deadline(
REFCYC_PER_VM_DMDATA, dlg_attr->refcyc_per_vm_dmdata);
}
+void hubp42_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ VM_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_7(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+
void hubp42_setup(
struct hubp *hubp,
struct dml2_dchub_per_pipe_register_set *pipe_regs,
@@ -255,7 +288,7 @@ void hubp42_setup(
* disable the requestors is not needed
*/
hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
- hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
+ hubp42_program_requestor(hubp, &pipe_regs->rq_regs);
hubp42_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
static void hubp42_program_surface_config(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
index 486c8907413a..88bb1337ab9d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
@@ -48,6 +48,8 @@
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_LOW, HUBP_3DLUT_ADDRESS_LOW, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, mask_sh)
+struct dml2_display_rq_regs;
+
bool hubp42_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
@@ -64,6 +66,10 @@ void hubp42_program_3dlut_fl_config(struct hubp *hubp,
void hubp42_read_state(struct hubp *hubp);
+void hubp42_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs);
+
void hubp42_setup(
struct hubp *hubp,
struct dml2_dchub_per_pipe_register_set *pipe_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index a0aaa727e9fa..e5d93dd348dd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -757,6 +757,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
+ if (!dc->clk_mgr || !dc->clk_mgr->bw_params || !dc->clk_mgr->funcs)
+ return;
+
clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
@@ -765,9 +768,10 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
clocks->fclk_p_state_change_support = true;
clocks->p_state_change_support = true;
+
if (dc->debug.disable_boot_optimizations) {
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
- } else {
+ } else if (dc->clk_mgr->funcs->get_dispclk_from_dentist) {
/* Even though DPG_EN = 1 for the connected display, it still requires the
* correct timing so we cannot set DISPCLK to min freq or it could cause
* audio corruption. Read current DISPCLK from DENTIST and request the same
@@ -776,10 +780,10 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
}
- dc->clk_mgr->funcs->update_clocks(
- dc->clk_mgr,
- dc->current_state,
- true);
+ if (dc->clk_mgr->funcs->update_clocks)
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
+ dc->current_state,
+ true);
}
void dcn32_init_hw(struct dc *dc)
@@ -1007,7 +1011,8 @@ void dcn32_init_hw(struct dc *dc)
DMUB_FW_VERSION(7, 0, 35)) {
/* FAMS2 is disabled */
dc->debug.fams2_config.bits.enable = false;
- if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) {
+ if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box &&
+ dc->clk_mgr && dc->clk_mgr->bw_params) {
/* update bounding box if FAMS2 disabled */
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 357899116ecd..a72284c3fa1c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -369,12 +369,14 @@ void dcn401_init_hw(struct dc *dc)
}
}
-void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx)
{
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
+ struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
+ primary_dpp_pipe_ctx->plane_res.hubp : NULL;
- if (hubp->funcs->hubp_enable_3dlut_fl) {
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ if (primary_hubp && primary_hubp->funcs->hubp_enable_3dlut_fl) {
+ primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
}
@@ -382,8 +384,11 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
+ const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
+ primary_dpp_pipe_ctx->plane_res.hubp : NULL;
const struct dc_plane_cm *cm = &plane_state->cm;
int mpcc_id = hubp->inst;
struct mpc *mpc = dc->res_pool->mpc;
@@ -481,25 +486,41 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, primary_hubp->inst);
/* HUBP */
- if (hubp->funcs->hubp_program_3dlut_fl_config)
- hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
+ if (primary_hubp->inst == hubp->inst) {
+ /* only program if this is the primary dpp pipe for the given plane */
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
- if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
+ if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
- if (hubp->funcs->hubp_enable_3dlut_fl) {
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ } else {
+ /* GPU memory only supports fast load path */
+ BREAK_TO_DEBUGGER();
+ lut_enable = false;
+ result = false;
+ }
} else {
- /* GPU memory only supports fast load path */
- BREAK_TO_DEBUGGER();
- lut_enable = false;
- result = false;
+ /* re-trigger priamry HUBP to load 3DLUT */
+ if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
+ primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
+ }
+
+ /* clear FL setup on this pipe's HUBP */
+ memset(&lut3d_dma, 0, sizeof(lut3d_dma));
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
+
+ if (hubp->funcs->hubp_enable_3dlut_fl)
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
}
} else {
/* Legacy (Host) Load Mode */
@@ -1809,42 +1830,41 @@ void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
* This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
* of whether OTG lock is currently being held or not.
*/
- struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
- struct pipe_ctx *odm_pipe, *mpc_pipe;
- int i, wa_pipe_ct = 0;
-
- for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
- for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
- if (mpc_pipe->plane_state &&
- mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
- mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
- wa_pipes[wa_pipe_ct++] = mpc_pipe;
- }
- }
+ const struct pipe_ctx *otg_master_pipe_ctx = resource_get_otg_master(pipe_ctx);
+ struct timing_generator *tg = otg_master_pipe_ctx ?
+ otg_master_pipe_ctx->stream_res.tg : NULL;
+ const struct pipe_ctx *primary_dpp_pipe_ctx = resource_is_pipe_type(pipe_ctx, DPP_PIPE) ?
+ resource_get_primary_dpp_pipe(pipe_ctx) : pipe_ctx;
+ struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
+ primary_dpp_pipe_ctx->plane_res.hubp : NULL;
+
+ if (!otg_master_pipe_ctx && !tg) {
+ return;
}
- if (wa_pipe_ct > 0) {
- if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
- pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
+ if (primary_dpp_pipe_ctx &&
+ primary_dpp_pipe_ctx->plane_state &&
+ primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_enable &&
+ primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_dma_enable) {
+ if (tg->funcs->set_vupdate_keepout)
+ tg->funcs->set_vupdate_keepout(tg, true);
- for (i = 0; i < wa_pipe_ct; ++i) {
- if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
- wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
+ primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
- pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
- if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
- pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
+ tg->funcs->unlock(tg);
+ if (tg->funcs->wait_update_lock_status)
+ tg->funcs->wait_update_lock_status(tg, false);
- for (i = 0; i < wa_pipe_ct; ++i) {
- if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
- wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
+ primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
- if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
- pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
+ if (tg->funcs->set_vupdate_keepout)
+ tg->funcs->set_vupdate_keepout(tg, false);
} else {
- pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ tg->funcs->unlock(tg);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index f78162ab859b..b9a03ffa2717 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -41,8 +41,7 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
bool dcn401_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream);
-void dcn401_trigger_3dlut_dma_load(struct dc *dc,
- struct pipe_ctx *pipe_ctx);
+void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx);
void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
unsigned int *tmds_div);
enum dc_status dcn401_enable_stream_timing(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
index 8e12dc1297c4..7f9c121c00e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
@@ -69,6 +69,7 @@ void dcn42_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
+ bool dchub_ref_freq_changed;
int current_dchub_ref_freq = 0;
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
@@ -203,7 +204,8 @@ void dcn42_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->link_enc->funcs->is_dig_enabled &&
+ if (link && link->link_enc &&
+ link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
hws->funcs.power_down) {
hws->funcs.power_down(dc);
@@ -260,8 +262,12 @@ void dcn42_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
- if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
- dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->clk_mgr->bw_params->num_channels, dc->config.sdpif_request_limit_words_per_umc);
+ if (dc->res_pool->hubbub->funcs->set_request_limit &&
+ dc->clk_mgr && dc->clk_mgr->bw_params &&
+ dc->config.sdpif_request_limit_words_per_umc > 0)
+ dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub,
+ dc->clk_mgr->bw_params->num_channels,
+ dc->config.sdpif_request_limit_words_per_umc);
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
@@ -269,13 +275,18 @@ void dcn42_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+
+ /* sw and fw FAMS versions must match for support */
dc->debug.fams2_config.bits.enable &=
- dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
- if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
- || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
+ dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver;
+ dchub_ref_freq_changed =
+ res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq;
+
+ if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) &&
+ dc->res_pool->funcs->update_bw_bounding_box &&
+ dc->clk_mgr && dc->clk_mgr->bw_params) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
- if (dc->clk_mgr)
- dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
}
if (dc->res_pool->pg_cntl) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index d1dba7ffcd9b..98abe0d2d30f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -1120,7 +1120,7 @@ struct hw_sequencer_funcs {
void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix, int opp_id);
- void (*trigger_3dlut_dma_load)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*trigger_3dlut_dma_load)(struct pipe_ctx *pipe_ctx);
/* VM Related */
int (*init_sys_ctx)(struct dce_hwseq *hws,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b152f6879495..51581c10fd6b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -256,7 +256,7 @@ struct default_adjustment {
enum dc_color_space out_color_space;
enum dc_color_space in_color_space;
enum dc_color_depth color_depth;
- enum pixel_format surface_pixel_format;
+ enum dc_pixel_format surface_pixel_format;
enum graphics_csc_adjust_type csc_adjust_type;
bool force_hw_default;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 5a1d9b708a9d..30990355985d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -160,7 +160,7 @@ struct scaler_data {
struct scaling_ratios ratios;
struct scl_inits inits;
struct sharpness_adj sharpness;
- enum pixel_format format;
+ enum dc_pixel_format format;
struct line_buffer_params lb_params;
// Below struct holds the scaler values to program hw registers
struct dscl_prog_data dscl_prog_data;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index f992c2d16748..7f1761080aba 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -781,7 +781,6 @@ static void restore_phy_clocks_for_destructive_link_verification(const struct dc
}
static void verify_link_capability_destructive(struct dc_link *link,
- struct dc_sink *sink,
enum dc_detect_reason reason)
{
bool should_prepare_phy_clocks =
@@ -855,11 +854,11 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
return destrictive;
}
-static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
+static void verify_link_capability(struct dc_link *link,
enum dc_detect_reason reason)
{
if (should_verify_link_capability_destructively(link, reason))
- verify_link_capability_destructive(link, sink, reason);
+ verify_link_capability_destructive(link, reason);
else
verify_link_capability_non_destructive(link);
}
@@ -1453,8 +1452,9 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
- if (is_local_sink_detect_success && link->local_sink)
- verify_link_capability(link, link->local_sink, reason);
+ if (is_local_sink_detect_success && link->local_sink) {
+ verify_link_capability(link, reason);
+ }
DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index b4f46408a000..e12c25896364 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -181,7 +181,8 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
/* link can be also enabled by vbios. In this case it is not recorded
* in pipe_ctx. Disable link phy here to make sure it is completely off
*/
- dp_disable_link_phy(link, &link_res, link->connector_signal);
+ if (dc_is_dp_signal(link->connector_signal))
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
}
void link_resume(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/pg/dcn42/dcn42_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/pg/dcn42/dcn42_pg_cntl.c
index 3685080ce9dc..96290538a889 100644
--- a/drivers/gpu/drm/amd/display/dc/pg/dcn42/dcn42_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/pg/dcn42/dcn42_pg_cntl.c
@@ -176,11 +176,12 @@ void pg_cntl42_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
bool block_enabled;
+ bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
+ pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
+ pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
+ pg_cntl->ctx->dc->idle_optimizations_allowed;
- if (pg_cntl->ctx->dc->debug.ignore_pg ||
- pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
- pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
- pg_cntl->ctx->dc->idle_optimizations_allowed)
+ if (skip_pg && !power_on)
return;
block_enabled = pg_cntl42_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 92c123aca0c9..fdcf8db6be50 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -650,9 +650,6 @@ static struct link_encoder *dce100_link_encoder_create(
return &enc110->base;
}
- if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
- return NULL;
-
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
@@ -661,7 +658,8 @@ static struct link_encoder *dce100_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 95852d277c22..ab71f645c90e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -671,7 +671,7 @@ static struct link_encoder *dce110_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
return NULL;
link_regs_id =
@@ -682,7 +682,8 @@ static struct link_encoder *dce110_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 58c6a00397cf..b7051bfd4326 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -632,7 +632,7 @@ static struct link_encoder *dce112_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
return NULL;
link_regs_id =
@@ -643,7 +643,8 @@ static struct link_encoder *dce112_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 71d76b021375..7ee70f7b3aa7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -716,7 +716,7 @@ static struct link_encoder *dce120_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
return NULL;
link_regs_id =
@@ -727,7 +727,8 @@ static struct link_encoder *dce120_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index a57d68427812..6a25dcfcdf17 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -746,18 +746,16 @@ static struct link_encoder *dce60_link_encoder_create(
return &enc110->base;
}
- if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
- return NULL;
-
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
dce60_link_encoder_construct(enc110,
- enc_init_data,
- &link_enc_feature,
- &link_enc_regs[link_regs_id],
- &link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index d66d8ac6d897..89927727a0d9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -752,9 +752,6 @@ static struct link_encoder *dce80_link_encoder_create(
return &enc110->base;
}
- if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
- return NULL;
-
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
@@ -763,7 +760,8 @@ static struct link_encoder *dce80_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
- &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
+ NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 046724c86c7a..6ce6b2b1f288 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1963,6 +1963,8 @@ static bool dcn31_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
+ dc->config.no_native422_support = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 4d5fcd7a0b00..0e0c52128c55 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1925,6 +1925,8 @@ static bool dcn315_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->config.no_native422_support = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 60a0e7c95c73..491860cc8378 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1790,7 +1790,7 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
}
}
-static int dcn401_get_power_profile(const struct dc_state *context)
+int dcn401_get_power_profile(const struct dc_state *context)
{
int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000;
int dpm_level = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 5f3b0319cb5b..47f82b818262 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -32,6 +32,7 @@ void dcn401_get_default_tiling_info(struct dc_tiling_info *tiling_info);
unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
+int dcn401_get_power_profile(const struct dc_state *context);
/* Following are definitions for run time init of reg offsets */
/* HUBP */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
index aef187bcf5c3..6328b3dc35f9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
@@ -760,6 +760,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.disable_stutter_for_wm_program = true,
+ .min_deep_sleep_dcfclk_khz = 8000,
};
static const struct dc_check_config config_defaults = {
@@ -1790,8 +1791,10 @@ static struct resource_funcs dcn42_res_pool_funcs = {
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.prepare_mcache_programming = dcn42_prepare_mcache_programming,
.build_pipe_pix_clk_params = dcn42_build_pipe_pix_clk_params,
+ .get_power_profile = dcn401_get_power_profile,
.get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
.get_max_hw_cursor_size = dcn42_get_max_hw_cursor_size,
+ .get_default_tiling_info = dcn10_get_default_tiling_info
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2015,7 +2018,7 @@ static bool dcn42_resource_construct(
dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
dc->config.use_pipe_ctx_sync_logic = true;
- dc->config.dc_mode_clk_limit_support = true;
+ dc->config.dc_mode_clk_limit_support = false;
dc->config.enable_windowed_mpo_odm = true;
/* Use psp mailbox to enable assr */
dc->config.use_assr_psp_message = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.h
index a9b26df14520..60acf0e423d9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.h
@@ -481,6 +481,8 @@
SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_RSMU_UNDERFLOW, ODM, inst), \
+ SRI_ARR(OPTC_UNDERFLOW_THRESHOLD, ODM, inst), \
SRI_ARR(CONTROL, VTG, inst), \
SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
SRI_ARR(OTG_GSL_CONTROL, OTG, inst), \
@@ -584,5 +586,6 @@ enum dc_status dcn42_validate_bandwidth(struct dc *dc,
enum dc_validate_mode validate_mode);
void dcn42_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
+int dcn42_get_power_profile(const struct dc_state *context);
#endif /* _DCN42_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
index fd9c24b5df53..1c04171b296c 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -93,6 +93,10 @@ static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
}
}
vmin_limit->dispclk_khz = min(dc_clk_table->entries[0].dispclk_mhz * 1000, vmin_limit->dispclk_khz);
+ /* dispclk is always fine-grain */
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels >= 2 ? 2 : 1;
+ dml_clk_table->dispclk.clk_values_khz[0] = 0;
+ dml_clk_table->dispclk.clk_values_khz[1] = dc_clk_table->entries[dc_clk_table->num_entries_per_clk.num_dispclk_levels - 1].dispclk_mhz * 1000;
}
/* dppclk */
@@ -105,6 +109,10 @@ static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
dml_clk_table->dppclk.clk_values_khz[i] = 0;
}
}
+ /* dppclk is always fine-grain */
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels >= 2 ? 2 : 1;
+ dml_clk_table->dppclk.clk_values_khz[0] = 0;
+ dml_clk_table->dppclk.clk_values_khz[1] = dc_clk_table->entries[dc_clk_table->num_entries_per_clk.num_dppclk_levels - 1].dppclk_mhz * 1000;
}
/* dtbclk */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e11e32afac6b..8fbd179a4c87 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -4437,6 +4437,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
REPLAY_GENERAL_CMD_SET_CONTINUOUSLY_RESYNC,
REPLAY_GENERAL_CMD_SET_COASTING_VTOTAL_WITHOUT_FRAME_UPDATE,
+ REPLAY_GENERAL_CMD_LIVE_CAPTURE_WITH_CVT,
};
struct dmub_alpm_auxless_data {
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
index 4d274b7034e8..7833a4fb7fbf 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
@@ -39,13 +39,14 @@ void dmub_srv_dcn42_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
void dmub_dcn42_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn42_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2639163b8ba2..1f225d0d6c44 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -896,6 +896,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x)
{
+ (void)coordinate_x;
uint32_t i;
struct fixed31_32 output;
struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
@@ -1339,6 +1340,7 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
+ (void)dividers;
uint32_t i;
struct fixed31_32 min = dc_fixpt_zero;
struct fixed31_32 max = dc_fixpt_one;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 19de72173052..c9150019aab0 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -114,6 +114,7 @@ static unsigned int calc_duration_in_us_from_v_total(
const struct mod_vrr_params *in_vrr,
unsigned int v_total)
{
+ (void)in_vrr;
unsigned int duration_in_us =
(unsigned int)(div64_u64(((unsigned long long)(v_total)
* 10000) * stream->timing.h_total,
@@ -218,6 +219,7 @@ static void update_v_total_for_static_ramp(
const struct dc_stream_state *stream,
struct mod_vrr_params *in_out_vrr)
{
+ (void)core_freesync;
unsigned int v_total = 0;
unsigned int current_duration_in_us =
calc_duration_in_us_from_v_total(
@@ -292,6 +294,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
+ (void)core_freesync;
unsigned int inserted_frame_duration_in_us = 0;
unsigned int mid_point_frames_ceil = 0;
unsigned int mid_point_frames_floor = 0;
@@ -447,6 +450,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
+ (void)core_freesync;
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
@@ -545,6 +549,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
unsigned int max_refresh_in_uhz,
struct mod_vrr_params *in_vrr)
{
+ (void)core_freesync;
if (in_vrr->state != in_config->state) {
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
@@ -946,6 +951,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
struct dc_info_packet *infopacket,
bool pack_sdp_v1_3)
{
+ (void)mod_freesync;
/* SPD info packet for FreeSync
* VTEM info packet for HdmiVRR
* Check if Freesync is supported. Return if false. If true,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 26a351a184f3..d07387a961dd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -501,6 +501,7 @@ static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
struct mod_hdcp_output *output)
{
+ (void)hdcp;
output->watchdog_timer_needed = 1;
output->watchdog_timer_delay = time;
}
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
index 52fbf2dc1899..3755a984681a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
@@ -9036,6 +9036,8 @@
// base address: 0x40
#define regODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
#define regODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_RSMU_UNDERFLOW 0x1adb
+#define regODM1_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM1_OPTC_UNDERFLOW_THRESHOLD 0x1adc
#define regODM1_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM1_OPTC_DATA_SOURCE_SELECT 0x1add
@@ -9060,6 +9062,8 @@
// base address: 0x80
#define regODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
#define regODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_RSMU_UNDERFLOW 0x1aeb
+#define regODM2_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM2_OPTC_UNDERFLOW_THRESHOLD 0x1aec
#define regODM2_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM2_OPTC_DATA_SOURCE_SELECT 0x1aed
@@ -9084,6 +9088,8 @@
// base address: 0xc0
#define regODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
#define regODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_RSMU_UNDERFLOW 0x1afb
+#define regODM3_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM3_OPTC_UNDERFLOW_THRESHOLD 0x1afc
#define regODM3_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM3_OPTC_DATA_SOURCE_SELECT 0x1afd
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a9b73f4fd466..1bbf531de5ed 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -119,6 +119,7 @@ enum pp_clock_type {
PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
+ OD_FCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
@@ -208,6 +209,7 @@ enum {
enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_SCLK_VDDC_TABLE,
PP_OD_EDIT_MCLK_VDDC_TABLE,
+ PP_OD_EDIT_FCLK_TABLE,
PP_OD_EDIT_CCLK_VDDC_TABLE,
PP_OD_EDIT_VDDC_CURVE,
PP_OD_RESTORE_DEFAULT_TABLE,
@@ -585,8 +587,61 @@ enum amdgpu_metrics_attr_id {
AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MID,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
+ AMDGPU_METRICS_ATTR_ID_LABEL_VERSION,
+ AMDGPU_METRICS_ATTR_ID_NODE_ID,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_RETIMER,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_IBC,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_IBC_2,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_VDD18_VR,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_04_HBM_B_VR,
+ AMDGPU_METRICS_ATTR_ID_NODE_TEMP_04_HBM_D_VR,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_SOCIO_A,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_SOCIO_C,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_X0,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_X1,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_HBM_B,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_HBM_D,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_04_HBM_B,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_04_HBM_D,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_HBM_B,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_HBM_D,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_HBM_B,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_HBM_D,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_11_GTA_A,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_11_GTA_C,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075_GTA_A,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075_GTA_C,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_UCIE,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAA,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAM_A,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAM_C,
+ AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FRONT,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_BACK,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_OAM7,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_IBC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_UFPGA,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_OAM1,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_0_1_HSC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_2_3_HSC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_4_5_HSC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_6_7_HSC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_IBC_HSC,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_IBC,
AMDGPU_METRICS_ATTR_ID_MAX,
};
@@ -1839,4 +1894,16 @@ enum amdgpu_xgmi_link_status {
AMDGPU_XGMI_LINK_NA = 2,
};
+struct amdgpu_gpuboard_temp_metrics_v1_1 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_1 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 9e2f69663f96..dbf3ae2f5e13 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -680,6 +680,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - minimum(not available for Vega20 and Navi1x) and maximum memory
* clock labeled OD_MCLK
*
+ * - minimum and maximum fabric clock labeled OD_FCLK (SMU13)
+ *
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve. This is
* available for Vega20 and NV1X.
@@ -715,10 +717,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - First select manual using power_dpm_force_performance_level
*
* - For clock frequency setting, enter a new value by writing a
- * string that contains "s/m index clock" to the file. The index
+ * string that contains "s/m/f index clock" to the file. The index
* should be 0 if to set minimum clock. And 1 if to set maximum
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- * "m 1 800" will update maximum mclk to be 800Mhz. For core
+ * "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will
+ * update maximum fabric clock to be 1600Mhz. For core
* clocks on VanGogh, the string contains "p core index clock".
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
@@ -768,6 +771,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
+ else if (*buf == 'f')
+ type = PP_OD_EDIT_FCLK_TABLE;
else if (*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
@@ -843,9 +848,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int size = 0;
int ret;
- enum pp_clock_type od_clocks[6] = {
+ enum pp_clock_type od_clocks[] = {
OD_SCLK,
OD_MCLK,
+ OD_FCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
@@ -857,10 +863,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (ret)
return ret;
- for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
- ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
- if (ret)
- break;
+ for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) {
+ amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
}
if (size == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 26b43a6276d0..8faf7de7aaa9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -47,6 +47,7 @@
#include "smu_v14_0_0_ppt.h"
#include "smu_v14_0_2_ppt.h"
#include "smu_v15_0_0_ppt.h"
+#include "smu_v15_0_8_ppt.h"
#include "amd_pcie.h"
/*
@@ -802,6 +803,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(15, 0, 0):
smu_v15_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(15, 0, 8):
+ smu_v15_0_8_set_ppt_funcs(smu);
+ smu->od_enabled = true;
+ break;
default:
return -EINVAL;
}
@@ -2965,6 +2970,7 @@ int smu_get_power_limit(void *handle,
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
+ case IP_VERSION(15, 0, 8):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
NULL, NULL, NULL);
@@ -3056,6 +3062,8 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
clk_type = SMU_OD_MCLK; break;
+ case OD_FCLK:
+ clk_type = SMU_OD_FCLK; break;
case OD_VDDC_CURVE:
clk_type = SMU_OD_VDDC_CURVE; break;
case OD_RANGE:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h
new file mode 100644
index 000000000000..6993d866183d
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu15_driver_if_v15_0_8.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_DRIVER_IF_H
+#define SMU_15_0_8_DRIVER_IF_H
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0,
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ /* 50 Kbits/s not supported anymore! */
+ UNSUPPORTED_1,
+ /* 100 Kbits/s */
+ I2C_SPEED_STANDARD_100K,
+ /* 400 Kbits/s */
+ I2C_SPEED_FAST_400K,
+ /* 1 Mbits/s (in fast mode) */
+ I2C_SPEED_FAST_PLUS_1M,
+ /* 1 Mbits/s (in high speed mode) not supported anymore!*/
+ UNSUPPORTED_2,
+ /* 2.3 Mbits/s not supported anymore! */
+ UNSUPPORTED_3,
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+/* bit should be 0 for read, 1 for write */
+#define CMDCONFIG_READWRITE_BIT 2
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+/* 64 Bit register offsets for PPSMC_MSG_McaBankDumpDW, PPSMC_MSG_McaBankCeDumpDW messages
+ * eg to read MCA_BANK_OFFSET_SYND for CE index, call PPSMC_MSG_McaBankCeDumpDW twice,
+ * (index << 16 + MCA_BANK_OFFSET_SYND*8) argument for 1st DWORD, and
+ * ((index << 16 ) + MCA_BANK_OFFSET_SYND*8 + 4) argument for 2nd DWORD */
+typedef enum {
+ MCA_BANK_OFFSET_CTL = 0,
+ MCA_BANK_OFFSET_STATUS = 1,
+ MCA_BANK_OFFSET_ADDR = 2,
+ MCA_BANK_OFFSET_MISC = 3,
+ MCA_BANK_OFFSET_IPID = 5,
+ MCA_BANK_OFFSET_SYND = 6,
+ MCA_BANK_OFFSET_MAX = 16,
+} MCA_BANK_OFFSET_e;
+
+/* Firmware MP1 AID MCA Error Codes stored in MCA_MP_MP1:MCMP1_SYNDT0 errorinformation */
+typedef enum {
+ /* MMHUB */
+ CODE_DAGB0 = 0,
+ CODE_DAGB1 = 1,
+ CODE_DAGB2 = 2,
+ CODE_DAGB3 = 3,
+ CODE_DAGB4 = 4,
+ CODE_EA0 = 5,
+ CODE_EA1 = 6,
+ CODE_EA2 = 7,
+ CODE_EA3 = 8,
+ CODE_EA4 = 9,
+ CODE_UTCL2_ROUTER = 10,
+ CODE_VML2 = 11,
+ CODE_VML2_WALKER = 12,
+ CODE_MMCANE = 13,
+
+ /* VCN VCPU */
+ CODE_VIDD = 14,
+ CODE_VIDV = 15,
+ /* VCN JPEG */
+ CODE_JPEG0S = 16,
+ CODE_JPEG0D = 17,
+ CODE_JPEG1S = 18,
+ CODE_JPEG1D = 19,
+ CODE_JPEG2S = 20,
+ CODE_JPEG2D = 21,
+ CODE_JPEG3S = 22,
+ CODE_JPEG3D = 23,
+ CODE_JPEG4S = 24,
+ CODE_JPEG4D = 25,
+ CODE_JPEG5S = 26,
+ CODE_JPEG5D = 27,
+ CODE_JPEG6S = 28,
+ CODE_JPEG6D = 29,
+ CODE_JPEG7S = 30,
+ CODE_JPEG7D = 31,
+ /* VCN MMSCH */
+ CODE_MMSCHD = 32,
+
+ /* SDMA */
+ CODE_SDMA0 = 33,
+ CODE_SDMA1 = 34,
+ CODE_SDMA2 = 35,
+ CODE_SDMA3 = 36,
+
+ /* SOC */
+ CODE_HDP = 37,
+ CODE_ATHUB = 38,
+ CODE_IH = 39,
+ CODE_XHUB_POISON = 40,
+ CODE_SMN_SLVERR = 41,
+ CODE_WDT = 42,
+
+ CODE_UNKNOWN = 43,
+ CODE_DMA = 44,
+ CODE_COUNT = 45,
+} ERR_CODE_e;
+
+/* Firmware MP5 XCD MCA Error Codes stored in MCA_MP_MP5:MCMP5_SYNDT0 errorinformation */
+typedef enum {
+ /* SH POISON FED */
+ SH_FED_CODE = 0,
+ /* GCEA Pin UE_ERR regs */
+ GCEA_CODE = 1,
+ SQ_CODE = 2,
+ LDS_CODE = 3,
+ GDS_CODE = 4,
+ SP0_CODE = 5,
+ SP1_CODE = 6,
+ TCC_CODE = 7,
+ TCA_CODE = 8,
+ TCX_CODE = 9,
+ CPC_CODE = 10,
+ CPF_CODE = 11,
+ CPG_CODE = 12,
+ SPI_CODE = 13,
+ RLC_CODE = 14,
+ /* GCEA Pin, UE_EDC regs */
+ SQC_CODE = 15,
+ TA_CODE = 16,
+ TD_CODE = 17,
+ TCP_CODE = 18,
+ TCI_CODE = 19,
+ /* GC Router */
+ GC_ROUTER_CODE = 20,
+ VML2_CODE = 21,
+ VML2_WALKER_CODE = 22,
+ ATCL2_CODE = 23,
+ GC_CANE_CODE = 24,
+
+ /* SOC error codes 41-43 are common with ERR_CODE_e */
+ MP5_CODE_SMN_SLVERR = CODE_SMN_SLVERR,
+ MP5_CODE_UNKNOWN = CODE_UNKNOWN,
+} GC_ERROR_CODE_e;
+
+/* SW I2C Command Table */
+typedef struct {
+ /* Return data for read. Data to send for write*/
+ uint8_t ReadWriteData;
+ /* Includes whether associated command should have a stop or restart command,
+ * and is a read or write */
+ uint8_t CmdConfig;
+} SwI2cCmd_t;
+
+/* SW I2C Request Table */
+typedef struct {
+ /* CKSVII2C0(0) or //CKSVII2C1(1) */
+ uint8_t I2CcontrollerPort;
+ /* Use I2cSpeed_e to indicate speed to select */
+ uint8_t I2CSpeed;
+ /* Slave address of device */
+ uint8_t SlaveAddress;
+ /* Number of commands */
+ uint8_t NumCmds;
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t;
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+ uint32_t Spare[8];
+ /* SMU internal use */
+ uint32_t MmHubPadding[8];
+} SwI2cRequestExternal_t;
+
+typedef enum {
+ PPCLK_UCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+/* TODO confirm if this is used in MI300 PPSMC_MSG_SetUclkDpmMode */
+typedef enum {
+ UCLK_DPM_MODE_BANDWIDTH,
+ UCLK_DPM_MODE_LATENCY,
+} UCLK_DPM_MODE_e;
+
+typedef struct {
+ /* 2 AVFS.PSM chains */
+ uint16_t AvgPsmCount_Chain0[13];
+ uint16_t AvgPsmCount_Chain1[15];
+ uint16_t MinPsmCount_Chain0[13];
+ uint16_t MinPsmCount_Chain1[15];
+ float MaxTemperature;
+
+ /* For voltage conversions, these are the array indexes
+ * 0:SOCIO
+ * 1:065_UCIE
+ * 2:075_UCIE
+ * 3:11_GTA
+ * 4:075_GTA */
+ float MinPsmVoltage[5];
+ float AvgPsmVoltage[5];
+} AvfsDebugTableMid_t;
+
+typedef struct {
+ /* 7 AVFS.PSM chains - not including TRO */
+ uint16_t AvgPsmCount_Chain0[15];
+ uint16_t AvgPsmCount_Chain1[15];
+ uint16_t AvgPsmCount_Chain2[13];
+ uint16_t AvgPsmCount_Chain3[13];
+ uint16_t AvgPsmCount_Chain4[15];
+ uint16_t AvgPsmCount_Chain5[15];
+ uint16_t AvgPsmCount_Chain6[5];
+ uint16_t MinPsmCount_Chain0[15];
+ uint16_t MinPsmCount_Chain1[15];
+ uint16_t MinPsmCount_Chain2[13];
+ uint16_t MinPsmCount_Chain3[13];
+ uint16_t MinPsmCount_Chain4[15];
+ uint16_t MinPsmCount_Chain5[15];
+ uint16_t MinPsmCount_Chain6[5];
+ float MaxTemperature;
+
+ /* For voltage conversions, these are the array indexes
+ * 0:VDDX */
+ float MinPsmVoltage;
+ float AvgPsmVoltage;
+} AvfsDebugTableAid_t;
+
+typedef struct {
+ /* 0-27 GFX, 28-29 SOC */
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableXcd_t;
+
+/* Defines used for IH-based thermal interrupts to GFX driver - A/X only */
+#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define IH_INTERRUPT_VFFLR_INT 0xA
+
+/* thermal over-temp mask defines for IH interrup to host */
+#define THROTTLER_PROCHOT_BIT 0
+#define THROTTLER_RESERVED 1
+/* AID, XCD, CCD throttling */
+#define THROTTLER_THERMAL_SOCKET_BIT 2
+/* VRHOT */
+#define THROTTLER_THERMAL_VR_BIT 3
+#define THROTTLER_THERMAL_HBM_BIT 4
+/* UEs are always reported, set flag to 0 to prevent clearing of UEs */
+#define ClearMcaOnRead_UE_FLAG_MASK 0x1
+/* Enable CE logging and clearing to driver */
+#define ClearMcaOnRead_CE_POLL_MASK 0x2
+/* AID MMHUB client IP CE Logging and clearing */
+#define ClearMcaOnRead_MMHUB_POLL_MASK 0x4
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h
new file mode 100644
index 000000000000..a3401c4cc20b
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_pmfw.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_PMFW_H
+#define SMU_15_0_8_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+#define NUM_GFX_P2S_TABLES 8
+#define NUM_PSM_DIDT_THRESHOLDS 3
+#define NUM_XCD_XVMIN_VMIN_THRESHOLDS 3
+
+#define PRODUCT_MODEL_NUMBER_LEN 20
+#define PRODUCT_NAME_LEN 64
+#define PRODUCT_SERIAL_LEN 20
+#define PRODUCT_MANUFACTURER_NAME_LEN 32
+#define PRODUCT_FRU_ID_LEN 32
+
+//Feature ID list
+#define FEATURE_ID_DATA_CALCULATION 1
+#define FEATURE_ID_DPM_FCLK 2
+#define FEATURE_ID_DPM_GFXCLK 3
+#define FEATURE_ID_DPM_SPARE_4 4
+#define FEATURE_ID_DPM_SPARE_5 5
+#define FEATURE_ID_DPM_UCLK 6
+#define FEATURE_ID_DPM_SPARE_7 7
+#define FEATURE_ID_DPM_XGMI 8
+#define FEATURE_ID_DS_FCLK 9
+#define FEATURE_ID_DS_GFXCLK 10
+#define FEATURE_ID_DS_LCLK 11
+#define FEATURE_ID_DS_MP0CLK 12
+#define FEATURE_ID_DS_MP1CLK 13
+#define FEATURE_ID_DS_MPIOCLK 14
+#define FEATURE_ID_DS_SOCCLK 15
+#define FEATURE_ID_DS_VCN 16
+#define FEATURE_ID_PPT 17
+#define FEATURE_ID_TDC 18
+#define FEATURE_ID_THERMAL 19
+#define FEATURE_ID_SOC_PCC 20
+#define FEATURE_ID_PROCHOT 21
+#define FEATURE_ID_XVMIN0_VMIN_AID 22
+#define FEATURE_ID_XVMIN1_DD_AID 23
+#define FEATURE_ID_XVMIN0_VMIN_XCD 24
+#define FEATURE_ID_XVMIN1_DD_XCD 25
+#define FEATURE_ID_FW_CTF 26
+#define FEATURE_ID_MGCG 27
+#define FEATURE_ID_PSI7 28
+#define FEATURE_ID_XGMI_PER_LINK_PWR_DOWN 29
+#define FEATURE_ID_SOC_DC_RTC 30
+#define FEATURE_ID_GFX_DC_RTC 31
+#define FEATURE_ID_DVM_MIN_PSM 32
+#define FEATURE_ID_PRC 33
+#define FEATURE_ID_PSM_DIDT 34
+#define FEATURE_ID_PIT 35
+#define FEATURE_ID_DVO 36
+#define FEATURE_ID_XVMIN_CLKSTOP_DS 37
+#define FEATURE_ID_HBM_THROTTLE_CTRL 38
+#define FEATURE_ID_DPM_GL2CLK 39
+#define FEATURE_ID_GC_CAC_EDC 40
+#define FEATURE_ID_DS_DMABECLK 41
+#define FEATURE_ID_DS_MPIFOECLK 42
+#define FEATURE_ID_DS_MPRASCLK 43
+#define FEATURE_ID_DS_MPNHTCLK 44
+#define FEATURE_ID_DS_FIOCLK 45
+#define FEATURE_ID_DS_DXIOCLK 46
+#define FEATURE_ID_PCC 47
+#define FEATURE_ID_OCP 48
+#define FEATURE_ID_TRO 49
+#define FEATURE_ID_GL2_CAC_EDC 50
+#define FEATURE_ID_SPARE_51 51
+#define FEATURE_ID_GL2_CGCG 52
+#define FEATURE_ID_XCAC 53
+#define FEATURE_ID_DS_GL2CLK 54
+#define FEATURE_ID_FCS_VIN_PCC 55
+#define FEATURE_ID_FCS_VDDX_OCP_WARN 56
+#define FEATURE_ID_FCS_PWRBRK 57
+#define FEATURE_ID_DF_CSTATE 58
+#define FEATURE_ID_ARO 59
+#define FEATURE_ID_PS_PsPowerLimit 60
+#define FEATURE_ID_PS_PsPowerFloor 61
+#define FEATURE_ID_OCPWARNRC 62
+#define FEATURE_ID_XGMI_FOLDING 63
+#define FEATURE_ID_SMU_CG 64
+#define NUM_FEATURES 65
+
+//MGCG Feature ID List
+#define WAFL_CG 0
+#define SMU_FUSE_CG_DEEPSLEEP 1
+#define SMUIO_CG 2
+#define RSMU_MGCG 3
+#define SMU_CLK_MGCG 4
+#define MP5_CG 5
+#define UMC_CG 6
+#define WAFL0_CLK 7
+#define WAFL1_CLK 8
+#define VCN_MGCG 9
+#define GL2_MGCG 10
+#define MGCG_NUM_FEATURES 11
+
+/* enum for MPIO PCIe gen speed msgs */
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN6,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN6_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ GFX_GUARDBAND_OFFSET_0,
+ GFX_GUARDBAND_OFFSET_1,
+ GFX_GUARDBAND_OFFSET_2,
+ GFX_GUARDBAND_OFFSET_3,
+ GFX_GUARDBAND_OFFSET_4,
+ GFX_GUARDBAND_OFFSET_5,
+ GFX_GUARDBAND_OFFSET_6,
+ GFX_GUARDBAND_OFFSET_7,
+ GFX_GUARDBAND_OFFSET_COUNT
+} GFX_GUARDBAND_OFFSET_e;
+
+typedef enum {
+ GFX_DVM_MARGINHI_0,
+ GFX_DVM_MARGINHI_1,
+ GFX_DVM_MARGINHI_2,
+ GFX_DVM_MARGINHI_3,
+ GFX_DVM_MARGINHI_4,
+ GFX_DVM_MARGINHI_5,
+ GFX_DVM_MARGINHI_6,
+ GFX_DVM_MARGINHI_7,
+ GFX_DVM_MARGINLO_0,
+ GFX_DVM_MARGINLO_1,
+ GFX_DVM_MARGINLO_2,
+ GFX_DVM_MARGINLO_3,
+ GFX_DVM_MARGINLO_4,
+ GFX_DVM_MARGINLO_5,
+ GFX_DVM_MARGINLO_6,
+ GFX_DVM_MARGINLO_7,
+ GFX_DVM_MARGIN_COUNT
+} GFX_DVM_MARGIN_e;
+
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_PLANE_VDDCR_X0_TEMP,
+ SVI_PLANE_VDDCR_X1_TEMP,
+
+ SVI_PLANE_VDDIO_HBM_B_TEMP,
+ SVI_PLANE_VDDIO_HBM_D_TEMP,
+ SVI_PLANE_VDDIO_04_HBM_B_TEMP,
+ SVI_PLANE_VDDIO_04_HBM_D_TEMP,
+ SVI_PLANE_VDDCR_HBM_B_TEMP,
+ SVI_PLANE_VDDCR_HBM_D_TEMP,
+ SVI_PLANE_VDDCR_075_HBM_B_TEMP,
+ SVI_PLANE_VDDCR_075_HBM_D_TEMP,
+
+ SVI_PLANE_VDDIO_11_GTA_A_TEMP,
+ SVI_PLANE_VDDIO_11_GTA_C_TEMP,
+ SVI_PLANE_VDDAN_075_GTA_A_TEMP,
+ SVI_PLANE_VDDAN_075_GTA_C_TEMP,
+
+ SVI_PLANE_VDDCR_075_UCIE_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAA_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAM_A_TEMP,
+ SVI_PLANE_VDDIO_065_UCIEAM_C_TEMP,
+
+ SVI_PLANE_VDDCR_SOCIO_A_TEMP,
+ SVI_PLANE_VDDCR_SOCIO_C_TEMP,
+
+ SVI_PLANE_VDDAN_075_TEMP,
+ SVI_MAX_TEMP_ENTRIES, //22
+} SVI_TEMP_e;
+
+typedef enum{
+ SYSTEM_POWER_UBB_POWER,
+ SYSTEM_POWER_UBB_POWER_THRESHOLD,
+ SYSTEM_POWER_MAX_ENTRIES_WO_RESERVED,
+ SYSTEM_POWER_MAX_ENTRIES = 4
+} SYSTEM_POWER_e;
+
+#define SMU_METRICS_TABLE_VERSION 0xF
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t HbmTemperature[12];
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t HbmTemperatureAcc[12];
+ uint32_t MidTemperature[2];
+ uint32_t AidTemperature[2];
+ uint32_t XcdTemperature[8];
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency[2];
+ uint32_t UclkFrequency[2];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint32_t GfxclkFrequency[8];
+ uint32_t SocclkFrequency[2];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[2];
+
+ //XGMI:
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc;
+ uint64_t XgmiWriteBandwidthAcc;
+
+ //ACTIVITY:
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[2];
+
+ //THROTTLERS
+ uint64_t ProchotResidencyAcc;
+ uint64_t PptResidencyAcc;
+ uint64_t SocketThmResidencyAcc;
+ uint64_t VrThmResidencyAcc;
+ uint64_t HbmThmResidencyAcc;
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[2];
+ uint64_t PCIeL0ToRecoveryCountAcc;
+ uint64_t PCIenReplayAAcc;
+ uint64_t PCIenReplayARolloverCountAcc;
+ uint64_t PCIeNAKSentCountAcc;
+ uint64_t PCIeNAKReceivedCountAcc;
+ uint64_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[40];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //NVML-Parity: Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+} MetricsTable_t;
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
+
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; //Defaults to 0.
+ uint16_t NodeIdentifier;
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, 13 entries,
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+
+ uint16_t SystemPower[SYSTEM_POWER_MAX_ENTRIES]; // UBB Current Power and Power Threshold
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
+} VfMetricsTable_t;
+
+/* FRU product information */
+typedef struct __attribute__((aligned(4))) {
+ uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
+ uint8_t Name[PRODUCT_NAME_LEN];
+ uint8_t Serial[PRODUCT_SERIAL_LEN];
+ uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
+ uint8_t FruId[PRODUCT_FRU_ID_LEN];
+} FRUProductInfo_t;
+
+#define SMU_STATIC_METRICS_TABLE_VERSION 0x1
+
+#pragma pack(push, 4)
+typedef struct {
+ //FRU PRODUCT INFO
+ FRUProductInfo_t ProductInfo; //from i2c
+
+ //POWER
+ uint32_t MaxSocketPowerLimit;
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t MaxFclkFrequency;
+ uint32_t MinFclkFrequency;
+ uint32_t MaxGl2clkFrequency;
+ uint32_t MinGl2clkFrequency;
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+
+ //CTF limits
+ uint32_t CTFLimit_MID;
+ uint32_t CTFLimit_AID;
+ uint32_t CTFLimit_XCD;
+ uint32_t CTFLimit_HBM;
+
+ //Thermal Throttling limits
+ uint32_t ThermalLimit_MID;
+ uint32_t ThermalLimit_AID;
+ uint32_t ThermalLimit_XCD;
+ uint32_t ThermalLimit_HBM;
+
+ //PSNs
+ uint64_t PublicSerialNumber_MID[2];
+ uint64_t PublicSerialNumber_AID[2];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
+
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h
new file mode 100644
index 000000000000..7ffb445f4c0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v15_0_8_ppsmc.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_15_0_8_PPSMC_H
+#define SMU_15_0_8_PPSMC_H
+
+/* SMU Response Codes */
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+/* Message Definitions */
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_GetMetricsVersion 0x6
+#define PPSMC_MSG_GetMetricsTable 0x7
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x8
+#define PPSMC_MSG_SetDriverDramAddr 0x9 //ARG0: low address, ARG1: high address
+#define PPSMC_MSG_SetToolsDramAddr 0xA //ARG0: low address, ARG1: high address
+//#define PPSMC_MSG_SetSystemVirtualDramAddr 0xB
+#define PPSMC_MSG_SetSoftMaxByFreq 0xC
+#define PPSMC_MSG_SetPptLimit 0xD
+#define PPSMC_MSG_GetPptLimit 0xE
+#define PPSMC_MSG_DramLogSetDramAddr 0xF //ARG0: low address, ARG1: high address, ARG2: size
+#define PPSMC_MSG_HeavySBR 0x10
+#define PPSMC_MSG_DFCstateControl 0x11
+#define PPSMC_MSG_GfxDriverResetRecovery 0x12
+#define PPSMC_MSG_TriggerVFFLR 0x13
+#define PPSMC_MSG_SetSoftMinGfxClk 0x14
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x15
+#define PPSMC_MSG_PrepareForDriverUnload 0x16
+#define PPSMC_MSG_QueryValidMcaCount 0x17
+#define PPSMC_MSG_McaBankDumpDW 0x18
+#define PPSMC_MSG_ClearMcaOnRead 0x19
+#define PPSMC_MSG_QueryValidMcaCeCount 0x1A
+#define PPSMC_MSG_McaBankCeDumpDW 0x1B
+#define PPSMC_MSG_SelectPLPDMode 0x1C
+#define PPSMC_MSG_SetThrottlingPolicy 0x1D
+#define PPSMC_MSG_ResetSDMA 0x1E
+#define PPSMC_MSG_GetRasTableVersion 0x1F
+#define PPSMC_MSG_GetRmaStatus 0x20
+#define PPSMC_MSG_GetBadPageCount 0x21
+#define PPSMC_MSG_GetBadPageMcaAddress 0x22
+#define PPSMC_MSG_GetBadPagePaAddress 0x23
+#define PPSMC_MSG_SetTimestamp 0x24
+#define PPSMC_MSG_GetTimestamp 0x25
+#define PPSMC_MSG_GetRasPolicy 0x26
+#define PPSMC_MSG_GetBadPageIpIdLoHi 0x27
+#define PPSMC_MSG_EraseRasTable 0x28
+#define PPSMC_MSG_GetStaticMetricsTable 0x29
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x2A
+#define PPSMC_MSG_GetBadPageSeverity 0x2B
+#define PPSMC_MSG_GetSystemMetricsTable 0x2C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x2D
+#define PPSMC_MSG_ResetVCN 0x2E
+#define PPSMC_MSG_SetFastPptLimit 0x2F
+#define PPSMC_MSG_GetFastPptLimit 0x30
+#define PPSMC_MSG_SetSoftMinGl2clk 0x31
+#define PPSMC_MSG_SetSoftMaxGl2clk 0x32
+#define PPSMC_MSG_SetSoftMinFclk 0x33
+#define PPSMC_MSG_SetSoftMaxFclk 0x34
+#define PPSMC_Message_Count 0x35
+
+/* PSMC Reset Types for driver msg argument */
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+/* PLPD modes */
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 584c4cfd0c16..636ff90923d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -42,8 +42,10 @@
__SMU_DUMMY_MAP(SetPptLimit), \
__SMU_DUMMY_MAP(SetDriverDramAddrHigh), \
__SMU_DUMMY_MAP(SetDriverDramAddrLow), \
+ __SMU_DUMMY_MAP(SetDriverDramAddr), \
__SMU_DUMMY_MAP(SetToolsDramAddrHigh), \
__SMU_DUMMY_MAP(SetToolsDramAddrLow), \
+ __SMU_DUMMY_MAP(SetToolsDramAddr), \
__SMU_DUMMY_MAP(TransferTableSmu2Dram), \
__SMU_DUMMY_MAP(TransferTableDram2Smu), \
__SMU_DUMMY_MAP(UseDefaultPPTable), \
@@ -292,7 +294,12 @@
__SMU_DUMMY_MAP(AllowZstates), \
__SMU_DUMMY_MAP(GetSmartShiftStatus), \
__SMU_DUMMY_MAP(EnableLSdma), \
- __SMU_DUMMY_MAP(DisableLSdma),
+ __SMU_DUMMY_MAP(DisableLSdma), \
+ __SMU_DUMMY_MAP(InitializeGfx), \
+ __SMU_DUMMY_MAP(SetSoftMaxFclk), \
+ __SMU_DUMMY_MAP(SetSoftMaxGl2clk), \
+ __SMU_DUMMY_MAP(SetSoftMinGl2clk), \
+ __SMU_DUMMY_MAP(GetSystemMetricsVersion),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -324,6 +331,7 @@ enum smu_clk_type {
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
+ SMU_OD_FCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
SMU_OD_VDDGFX_OFFSET,
@@ -334,6 +342,7 @@ enum smu_clk_type {
SMU_OD_FAN_MINIMUM_PWM,
SMU_OD_FAN_ZERO_RPM_ENABLE,
SMU_OD_FAN_ZERO_RPM_STOP_TEMP,
+ SMU_GL2CLK,
SMU_CLK_COUNT,
};
@@ -472,6 +481,14 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
__SMU_DUMMY_MAP(FAN_ABNORMAL), \
__SMU_DUMMY_MAP(PIT), \
+ __SMU_DUMMY_MAP(DS_DMABECLK), \
+ __SMU_DUMMY_MAP(DS_MPIFOECLK), \
+ __SMU_DUMMY_MAP(DS_MPRASCLK), \
+ __SMU_DUMMY_MAP(DS_MPNHTCLK), \
+ __SMU_DUMMY_MAP(DS_FIOCLK), \
+ __SMU_DUMMY_MAP(DS_DXIOCLK), \
+ __SMU_DUMMY_MAP(DS_GL2CLK), \
+ __SMU_DUMMY_MAP(DPM_GL2CLK), \
__SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
index 09743ccd0d15..e6fd8be2cc4a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
@@ -41,7 +41,10 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
-#define MAX_DPM_LEVELS 16
+#define SMU15_DRIVER_IF_VERSION_SMU_V15_0_8 0x007D0000
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
#define MAX_PCIE_CONF 3
#define SMU15_TOOL_SIZE 0x19000
@@ -65,45 +68,28 @@ struct smu_15_0_max_sustainable_clocks {
uint32_t soc_clock;
};
-struct smu_15_0_dpm_clk_level {
- bool enabled;
- uint32_t value;
-};
-
-struct smu_15_0_dpm_table {
- uint32_t min; /* MHz */
- uint32_t max; /* MHz */
- uint32_t count;
- bool is_fine_grained;
- struct smu_15_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
-};
-
-struct smu_15_0_pcie_table {
- uint8_t pcie_gen[MAX_PCIE_CONF];
- uint8_t pcie_lane[MAX_PCIE_CONF];
- uint16_t clk_freq[MAX_PCIE_CONF];
- uint32_t num_of_link_levels;
-};
-
struct smu_15_0_dpm_tables {
- struct smu_15_0_dpm_table soc_table;
- struct smu_15_0_dpm_table gfx_table;
- struct smu_15_0_dpm_table uclk_table;
- struct smu_15_0_dpm_table eclk_table;
- struct smu_15_0_dpm_table vclk_table;
- struct smu_15_0_dpm_table dclk_table;
- struct smu_15_0_dpm_table dcef_table;
- struct smu_15_0_dpm_table pixel_table;
- struct smu_15_0_dpm_table display_table;
- struct smu_15_0_dpm_table phy_table;
- struct smu_15_0_dpm_table fclk_table;
- struct smu_15_0_pcie_table pcie_table;
+ struct smu_dpm_table soc_table;
+ struct smu_dpm_table gfx_table;
+ struct smu_dpm_table uclk_table;
+ struct smu_dpm_table eclk_table;
+ struct smu_dpm_table vclk_table;
+ struct smu_dpm_table dclk_table;
+ struct smu_dpm_table dcef_table;
+ struct smu_dpm_table pixel_table;
+ struct smu_dpm_table display_table;
+ struct smu_dpm_table phy_table;
+ struct smu_dpm_table fclk_table;
+ struct smu_pcie_table pcie_table;
+ struct smu_dpm_table gl2_table;
};
struct smu_15_0_dpm_context {
struct smu_15_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
+ uint32_t board_volt;
};
enum smu_15_0_power_state {
@@ -118,6 +104,7 @@ struct smu_15_0_power_context {
uint32_t power_source;
uint8_t in_power_limit_boost_mode;
enum smu_15_0_power_state power_state;
+ atomic_t throttle_status;
};
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
@@ -197,7 +184,7 @@ int smu_v15_0_set_power_source(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_15_0_dpm_table *single_dpm_table);
+ struct smu_dpm_table *single_dpm_table);
int smu_v15_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 12b052d920f5..7ca8fdd23206 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -262,7 +262,6 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 2c20624caca4..ac5e44dff6c9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -101,7 +101,6 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a3b755c61b1f..be9a7a32de99 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2466,4 +2466,6 @@ void smu_v13_0_reset_custom_level(struct smu_context *smu)
pstate_table->uclk_pstate.custom.max = 0;
pstate_table->gfxclk_pstate.custom.min = 0;
pstate_table->gfxclk_pstate.custom.max = 0;
+ pstate_table->fclk_pstate.custom.min = 0;
+ pstate_table->fclk_pstate.custom.max = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 59720372fc17..b414a74d29fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -59,6 +59,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min, int32_t *max);
+
static const struct smu_feature_bits smu_v13_0_0_dpm_features = {
.bits = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1043,8 +1047,35 @@ static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
+ int32_t min_value, max_value;
+ bool feature_enabled;
- return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_FAN_CURVE_BIT:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ if (feature_enabled) {
+ smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+ }
+ break;
+ default:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ break;
+ }
+
+out:
+ return feature_enabled;
}
static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index bc361fee5777..475541189782 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -461,6 +461,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 4 && fw_ver >= 0x04557100) ||
(pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -1199,6 +1200,7 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
@@ -1216,6 +1218,12 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
+ pstate_table->fclk_pstate.curr.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.curr.max = SMU_DPM_TABLE_MAX(fclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
+
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
@@ -1394,14 +1402,22 @@ static int smu_v13_0_6_emit_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
- return 0;
+ return -EOPNOTSUPP;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
pstate_table->uclk_pstate.curr.max);
break;
+ case SMU_OD_FCLK:
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT))
+ return -EOPNOTSUPP;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_FCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->fclk_pstate.curr.min,
+ pstate_table->fclk_pstate.curr.max);
+ break;
case SMU_SCLK:
case SMU_GFXCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
@@ -2043,7 +2059,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
int ret = 0;
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
- clk_type != SMU_UCLK)
+ clk_type != SMU_UCLK && clk_type != SMU_FCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
@@ -2084,6 +2100,15 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
pstate_table->uclk_pstate.curr.max = max;
}
+ if (clk_type == SMU_FCLK) {
+ if (max == pstate_table->fclk_pstate.curr.max)
+ return 0;
+
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, 0, max, false);
+ if (!ret)
+ pstate_table->fclk_pstate.curr.max = max;
+ }
+
return ret;
}
@@ -2125,6 +2150,8 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
uint32_t min_clk;
uint32_t max_clk;
@@ -2205,6 +2232,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
pstate_table->uclk_pstate.custom.max = input[1];
}
break;
+ case PP_OD_EDIT_FCLK_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu,
+ SMU_FEATURE_DPM_FCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "FCLK limits setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min FCLK level is not supported\n");
+ return -EOPNOTSUPP;
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum FCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->fclk_pstate.custom.max =
+ pstate_table->fclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->fclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
@@ -2224,14 +2285,27 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
- min_clk = SMU_DPM_TABLE_MIN(
- &dpm_context->dpm_tables.uclk_table);
- max_clk = SMU_DPM_TABLE_MAX(
- &dpm_context->dpm_tables.uclk_table);
- ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_UCLK, min_clk, max_clk, false);
- if (ret)
- return ret;
+ if (SMU_DPM_TABLE_MAX(uclk_table) !=
+ pstate_table->uclk_pstate.curr.max) {
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.uclk_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
+ ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
+ SMU_UCLK, min_clk,
+ max_clk, false);
+ if (ret)
+ return ret;
+ }
+
+ if (SMU_DPM_TABLE_MAX(fclk_table) !=
+ pstate_table->fclk_pstate.curr.max) {
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.fclk_table);
+ ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
+ SMU_FCLK, min_clk,
+ max_clk, false);
+ if (ret)
+ return ret;
+ }
smu_v13_0_reset_custom_level(smu);
}
break;
@@ -2258,6 +2332,16 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
+ if (pstate_table->fclk_pstate.custom.max) {
+ min_clk = pstate_table->fclk_pstate.curr.min;
+ max_clk = pstate_table->fclk_pstate.custom.max;
+ ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
+ SMU_FCLK, min_clk,
+ max_clk, false);
+ if (ret)
+ return ret;
+ }
+
if (!pstate_table->uclk_pstate.custom.max)
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 88e0d99b8ad2..fd0b6215364f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -59,6 +59,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min, int32_t *max);
+
static const struct smu_feature_bits smu_v13_0_7_dpm_features = {
.bits = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1053,8 +1057,35 @@ static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
+ int32_t min_value, max_value;
+ bool feature_enabled;
- return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_FAN_CURVE_BIT:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ if (feature_enabled) {
+ smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+ }
+ break;
+ default:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ break;
+ }
+
+out:
+ return feature_enabled;
}
static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index cec2df1ad0af..e38354c694c9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -284,7 +284,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_info(adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 73762d9b5969..c3ebfac062a7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -56,6 +56,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min, int32_t *max);
+
static const struct smu_feature_bits smu_v14_0_2_dpm_features = {
.bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
@@ -922,8 +926,35 @@ static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
+ int32_t min_value, max_value;
+ bool feature_enabled;
- return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_FAN_CURVE_BIT:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ if (feature_enabled) {
+ smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value, &max_value);
+ if (!min_value && !max_value) {
+ feature_enabled = false;
+ goto out;
+ }
+ }
+ break;
+ default:
+ feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
+ break;
+ }
+
+out:
+ return feature_enabled;
}
static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
index 7f59a0aabdeb..fa083ad46c0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o
+SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o smu_v15_0_8_ppt.o
AMD_SWSMU_SMU15MGR = $(addprefix $(AMD_SWSMU_PATH)/smu15/,$(SMU15_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
index 09bb3d91a001..c3cb36813806 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
@@ -589,71 +589,52 @@ int smu_v15_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
- int ret = 0;
- uint64_t address;
- uint32_t address_low, address_high;
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_DramLogSetDramAddr,
+ .num_args = 3,
+ .num_out_args = 0,
+ };
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
- return ret;
+ return 0;
- address = memory_pool->mc_address;
- address_high = (uint32_t)upper_32_bits(address);
- address_low = (uint32_t)lower_32_bits(address);
+ /* SMU_MSG_DramLogSetDramAddr: ARG0=low, ARG1=high, ARG2=size */
+ args.args[0] = lower_32_bits(memory_pool->mc_address);
+ args.args[1] = upper_32_bits(memory_pool->mc_address);
+ args.args[2] = (u32)memory_pool->size;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high, NULL);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low, NULL);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size, NULL);
- if (ret)
- return ret;
-
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
- int ret = 0;
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_SetDriverDramAddr,
+ .num_args = 2,
+ .num_out_args = 0,
+ };
- if (driver_table->mc_address) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address),
- NULL);
- if (!ret)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address),
- NULL);
- }
+ args.args[0] = lower_32_bits(driver_table->mc_address);
+ args.args[1] = upper_32_bits(driver_table->mc_address);
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_tool_table_location(struct smu_context *smu)
{
- int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_SetToolsDramAddr,
+ .num_args = 2,
+ .num_out_args = 0,
+ };
- if (tool_table->mc_address) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address),
- NULL);
- if (!ret)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address),
- NULL);
- }
+ /* SMU_MSG_SetToolsDramAddr: ARG0=low, ARG1=high */
+ args.args[0] = lower_32_bits(tool_table->mc_address);
+ args.args[1] = upper_32_bits(tool_table->mc_address);
- return ret;
+ return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_allowed_mask(struct smu_context *smu)
@@ -700,8 +681,7 @@ int smu_v15_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-int smu_v15_0_system_features_control(struct smu_context *smu,
- bool en)
+int smu_v15_0_system_features_control(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
@@ -905,7 +885,8 @@ static int smu_v15_0_wait_for_reset_complete(struct smu_context *smu,
return ret;
}
-int smu_v15_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+int smu_v15_0_wait_for_event(struct smu_context *smu,
+ enum smu_event_type event,
uint64_t event_arg)
{
int ret = -EINVAL;
@@ -1077,18 +1058,12 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
{
struct smu_15_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_15_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_15_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_15_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_15_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_15_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_15_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
@@ -1103,34 +1078,34 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- sclk_min = sclk_max = gfx_table->max;
- mclk_min = mclk_max = mem_table->max;
- socclk_min = socclk_max = soc_table->max;
- vclk_min = vclk_max = vclk_table->max;
- dclk_min = dclk_max = dclk_table->max;
- fclk_min = fclk_max = fclk_table->max;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- sclk_min = sclk_max = gfx_table->min;
- mclk_min = mclk_max = mem_table->min;
- socclk_min = socclk_max = soc_table->min;
- vclk_min = vclk_max = vclk_table->min;
- dclk_min = dclk_max = dclk_table->min;
- fclk_min = fclk_max = fclk_table->min;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MIN(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MIN(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MIN(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MIN(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MIN(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MIN(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- sclk_min = gfx_table->min;
- sclk_max = gfx_table->max;
- mclk_min = mem_table->min;
- mclk_max = mem_table->max;
- socclk_min = soc_table->min;
- socclk_max = soc_table->max;
- vclk_min = vclk_table->min;
- vclk_max = vclk_table->max;
- dclk_min = dclk_table->min;
- dclk_max = dclk_table->max;
- fclk_min = fclk_table->min;
- fclk_max = fclk_table->max;
+ sclk_min = SMU_DPM_TABLE_MIN(gfx_table);
+ sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = SMU_DPM_TABLE_MIN(mem_table);
+ mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = SMU_DPM_TABLE_MIN(soc_table);
+ socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = SMU_DPM_TABLE_MIN(vclk_table);
+ vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = SMU_DPM_TABLE_MIN(dclk_table);
+ dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = SMU_DPM_TABLE_MIN(fclk_table);
+ fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1352,10 +1327,11 @@ static int smu_v15_0_get_fine_grained_status(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_15_0_dpm_table *single_dpm_table)
+ struct smu_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
+ bool is_fine_grained;
int i;
ret = smu_v15_0_get_dpm_level_count(smu,
@@ -1368,12 +1344,15 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
ret = smu_v15_0_get_fine_grained_status(smu,
clk_type,
- &single_dpm_table->is_fine_grained);
+ &is_fine_grained);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
return ret;
}
+ if (is_fine_grained)
+ single_dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED;
+
for (i = 0; i < single_dpm_table->count; i++) {
ret = smu_v15_0_get_dpm_freq_by_index(smu,
clk_type,
@@ -1386,11 +1365,6 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
-
- if (i == 0)
- single_dpm_table->min = clk;
- else if (i == single_dpm_table->count - 1)
- single_dpm_table->max = clk;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
new file mode 100644
index 000000000000..cc2babc6a341
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
@@ -0,0 +1,2270 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v15_0_8_pmfw.h"
+#include "smu15_driver_if_v15_0_8.h"
+#include "smu_v15_0_8_ppsmc.h"
+#include "smu_v15_0_8_ppt.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+#include "mp/mp_15_0_8_offset.h"
+#include "mp/mp_15_0_8_sh_mask.h"
+#include "smu_v15_0.h"
+#include "amdgpu_fru_eeprom.h"
+
+#undef MP1_Public
+
+/* address block */
+#define MP1_Public 0x03b00000
+#define smnMP1_FIRMWARE_FLAGS_15_0_8 0x3010024
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0xF) == 0xF)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 4, (stack_idx)++) \
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_15_0_8_FEA_MAP(smu_feature, smu_15_0_8_feature) \
+ [smu_feature] = { 1, (smu_15_0_8_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+static const struct smu_feature_bits smu_v15_0_8_dpm_features = {
+ .bits = { SMU_FEATURE_BIT_INIT(FEATURE_ID_DATA_CALCULATION),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_GFXCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_UCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_FCLK),
+ SMU_FEATURE_BIT_INIT(FEATURE_ID_DPM_GL2CLK) }
+};
+
+static const struct cmn2asic_msg_mapping smu_v15_0_8_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
+ MSG_MAP(SetDriverDramAddr, PPSMC_MSG_SetDriverDramAddr, 1),
+ MSG_MAP(SetToolsDramAddr, PPSMC_MSG_SetToolsDramAddr, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0),
+ MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0),
+ MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0),
+ MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0),
+ MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0),
+ MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsVersion, PPSMC_MSG_GetSystemMetricsVersion, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 0),
+ MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 0),
+ MSG_MAP(SetSoftMinGl2clk, PPSMC_MSG_SetSoftMinGl2clk, 0),
+ MSG_MAP(SetSoftMaxGl2clk, PPSMC_MSG_SetSoftMaxGl2clk, 0),
+ MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
+ MSG_MAP(SetSoftMaxFclk, PPSMC_MSG_SetSoftMaxFclk, 0),
+};
+
+/* TODO: Update the clk map once enum PPCLK is updated in smu15_driver_if_v15_0_8.h */
+static struct cmn2asic_mapping smu_v15_0_8_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(UCLK, PPCLK_UCLK),
+};
+
+static const struct cmn2asic_mapping smu_v15_0_8_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_ID_DATA_CALCULATION),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_ID_DPM_GFXCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_ID_DPM_UCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_ID_DPM_FCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DPM_GL2CLK_BIT, FEATURE_ID_DPM_GL2CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_ID_DS_GFXCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_ID_DS_SOCCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_ID_DS_LCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_ID_DS_FCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_DMABECLK_BIT, FEATURE_ID_DS_DMABECLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPIFOECLK_BIT, FEATURE_ID_DS_MPIFOECLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPRASCLK_BIT, FEATURE_ID_DS_MPRASCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPNHTCLK_BIT, FEATURE_ID_DS_MPNHTCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_FIOCLK_BIT, FEATURE_ID_DS_FIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_DXIOCLK_BIT, FEATURE_ID_DS_DXIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_GL2CLK_BIT, FEATURE_ID_DS_GL2CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_ID_PPT),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_ID_TDC),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_ID_SMU_CG),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_ID_FW_CTF),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_ID_THERMAL),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_ID_SOC_PCC),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_ID_XGMI_PER_LINK_PWR_DOWN),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_ID_DS_VCN),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_ID_DS_MP1CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_ID_DS_MPIOCLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_ID_DS_MP0CLK),
+ SMU_15_0_8_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_ID_PIT),
+};
+
+#define TABLE_PMSTATUSLOG 0
+#define TABLE_SMU_METRICS 1
+#define TABLE_I2C_COMMANDS 2
+#define TABLE_COUNT 3
+
+static const struct cmn2asic_mapping smu_v15_0_8_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(I2C_COMMANDS),
+};
+
+static size_t smu_v15_0_8_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
+static int smu_v15_0_8_tables_init(struct smu_context *smu)
+{
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics;
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ int ret, gpu_metrcs_size = sizeof(MetricsTable_t);
+ struct smu_table *tables = smu_table->tables;
+ struct smu_v15_0_8_gpu_metrics *gpu_metrics;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU15_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ gpu_metrcs_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v15_0_8_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(gpu_metrcs_size, GFP_KERNEL);
+ if (!metrics_table)
+ return -ENOMEM;
+
+ smu_table->metrics_time = 0;
+
+ driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
+ return -ENOMEM;
+
+ ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPU_METRICS,
+ sizeof(struct smu_v15_0_8_gpu_metrics),
+ SMU_GPU_METRICS_CACHE_INTERVAL);
+ if (ret)
+ return ret;
+
+ gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPU_METRICS);
+ smu_v15_0_8_gpu_metrics_init(gpu_metrics, 1, 9);
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v15_0_8_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ /* Initialize base board temperature metrics */
+ ret = smu_driver_table_init(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ baseboard_temp_metrics = (struct smu_v15_0_8_baseboard_temp_metrics *)
+ smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_v15_0_8_baseboard_temp_metrics_init(baseboard_temp_metrics, 1, 1);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_driver_table_fini(smu,
+ SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ gpuboard_temp_metrics = (struct smu_v15_0_8_gpuboard_temp_metrics *)
+ smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_v15_0_8_gpuboard_temp_metrics_init(gpuboard_temp_metrics, 1, 1);
+
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
+ mutex_init(&smu_table->metrics_lock);
+
+ return 0;
+}
+
+static int smu_v15_0_8_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context =
+ kzalloc(sizeof(struct smu_15_0_dpm_context), GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+ smu_dpm->dpm_context_size = sizeof(struct smu_15_0_dpm_context);
+
+ smu_dpm->dpm_policies =
+ kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL);
+ if (!smu_dpm->dpm_policies) {
+ kfree(smu_dpm->dpm_context);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v15_0_8_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_8_allocate_dpm_context(smu);
+
+ return ret;
+}
+
+static int smu_v15_0_8_tables_fini(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ smu_driver_table_fini(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_driver_table_fini(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ mutex_destroy(&smu_table->metrics_lock);
+
+ return 0;
+}
+
+static int smu_v15_0_8_fini_smc_tables(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_v15_0_8_tables_fini(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_fini_smc_tables(smu);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v15_0_8_init_allowed_features(struct smu_context *smu)
+{
+ /* pptable will handle the features to enable */
+ smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_metrics_table_internal(struct smu_context *smu, uint32_t tmo, void *data)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ struct amdgpu_device *adev = smu->adev;
+
+ mutex_lock(&smu_table->metrics_lock);
+
+ if (!tmo || !smu_table->metrics_time ||
+ time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(tmo))) {
+ int ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
+ if (ret) {
+ dev_info(adev->dev,
+ "Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu_table->metrics_lock);
+ return ret;
+ }
+
+ amdgpu_device_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (data)
+ memcpy(data, smu_table->metrics_table, table_size);
+ mutex_unlock(&smu_table->metrics_lock);
+ return 0;
+}
+
+static int smu_v15_0_8_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, xcc_id;
+
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 10, NULL);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_ROUND(metrics->UclkFrequency[0]);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_ROUND(metrics->FclkFrequency[0]);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_CURR_SOCKETPOWER:
+ *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ {
+ struct amdgpu_device *adev = smu->adev;
+ u32 max_hbm_temp = 0;
+
+ /* Find max temperature across all HBM stacks */
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int stack_idx;
+
+ for_each_hbm_stack(stack_idx, mask) {
+ u32 temp;
+
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+
+ temp = SMUQ10_ROUND(metrics->HbmTemperature[stack_idx]);
+ if (temp > max_hbm_temp)
+ max_hbm_temp = temp;
+ }
+ }
+ *value = max_hbm_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ }
+ /* This is the max of all VRs and not just SOC VR.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v15_0_8_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v15_0_8_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_MEMACTIVITY, value);
+ break;
+ default:
+ dev_err(smu->adev->dev,
+ "Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_MEM, value);
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+ smu_table_cache_update_time(sys_table, jiffies);
+ memcpy(sys_table->cache.buffer, table->cpu_addr,
+ sizeof(SystemMetricsTable_t));
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ /*TBD as of now put 0 */
+ *value = 0;
+ return 0;
+ }
+
+ ret = smu_v15_0_8_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor, void *data,
+ uint32_t *size)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ int ret = 0;
+
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v15_0_8_get_current_activity_percent(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = smu_v15_0_8_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v15_0_8_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu,
+ SMU_UCLK, (uint32_t *)data);
+ /* the output clock frequency in 10K unit */
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu,
+ SMU_GFXCLK, (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v15_0_8_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v15_0_8_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf,
+ int *offset)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct smu_15_0_dpm_context *dpm_context;
+ struct smu_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ int ret, now, size = *offset;
+
+ if (amdgpu_ras_intr_triggered()) {
+ sysfs_emit_at(buf, size, "unavailable\n");
+ return -EBUSY;
+ }
+
+ dpm_context = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ break;
+ case SMU_OD_MCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ break;
+ case SMU_SCLK:
+ case SMU_GFXCLK:
+ single_dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ single_dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ single_dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ single_dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ break;
+ }
+
+ if (single_dpm_table) {
+ ret = smu_v15_0_8_get_current_clk_freq_by_table(smu, type, &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current clk Failed!");
+ return ret;
+ }
+ ret = smu_cmn_print_dpm_clk_levels(smu, single_dpm_table, now,
+ buf, offset);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ }
+
+ *offset = size;
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ struct smu_dpm_table *dpm_table;
+ uint32_t min_clk = 0, max_clk = 0;
+
+ if (!pptable->init)
+ return -EINVAL;
+
+ /* Try cached DPM tables first */
+ if (dpm_context) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_GL2CLK:
+ dpm_table = &dpm_context->dpm_tables.gl2_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dpm_table = NULL;
+ break;
+ }
+
+ if (dpm_table && dpm_table->count > 0) {
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
+
+ if (min_clk && max_clk) {
+ if (min)
+ *min = min_clk;
+ if (max)
+ *max = max_clk;
+ return 0;
+ }
+ }
+ }
+
+ /* Fall back to pptable */
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ min_clk = pptable->MinGfxclkFrequency;
+ max_clk = pptable->MaxGfxclkFrequency;
+ break;
+ case SMU_FCLK:
+ min_clk = pptable->MinFclkFrequency;
+ max_clk = pptable->MaxFclkFrequency;
+ break;
+ case SMU_GL2CLK:
+ min_clk = pptable->MinGl2clkFrequency;
+ max_clk = pptable->MaxGl2clkFrequency;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_clk = pptable->UclkFrequencyTable[0];
+ max_clk = pptable->UclkFrequencyTable[ARRAY_SIZE(pptable->UclkFrequencyTable) - 1];
+ break;
+ case SMU_SOCCLK:
+ min_clk = pptable->SocclkFrequency;
+ max_clk = pptable->SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ min_clk = pptable->VclkFrequency;
+ max_clk = pptable->VclkFrequency;
+ break;
+ case SMU_DCLK:
+ min_clk = pptable->DclkFrequency;
+ max_clk = pptable->DclkFrequency;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = min_clk;
+ if (max)
+ *max = max_clk;
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_dpm_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_dpm_table *dpm_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int i, ret;
+ uint32_t gfxclkmin, gfxclkmax;
+
+ /* gfxclk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = smu_v15_0_8_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
+ &gfxclkmin, &gfxclkmax);
+ if (ret)
+ return ret;
+
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = gfxclkmin;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = gfxclkmax;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* fclk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = pptable->MinFclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = pptable->MaxFclkFrequency;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinFclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* gl2clk dpm table setup - fine-grained */
+ dpm_table = &dpm_context->dpm_tables.gl2_table;
+ dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GL2CLK_BIT)) {
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = pptable->MinGl2clkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = pptable->MaxGl2clkFrequency;
+ dpm_table->dpm_levels[1].enabled = true;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGl2clkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* uclk dpm table setup - discrete levels */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
+ dpm_table->flags = 0;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ dpm_table->count = ARRAY_SIZE(pptable->UclkFrequencyTable);
+ for (i = 0; i < dpm_table->count; ++i) {
+ dpm_table->dpm_levels[i].value = pptable->UclkFrequencyTable[i];
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->UclkFrequencyTable[0];
+ dpm_table->dpm_levels[0].enabled = true;
+ }
+
+ /* socclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->SocclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ /* vclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->VclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ /* dclk dpm table setup - single boot-time value */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
+ dpm_table->flags = 0;
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->DclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+
+ return 0;
+}
+
+static int smu_v15_0_8_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ /* TODO: PPTable is not available.
+ * 1) Find an alternate way to get 'PPTable values' here.
+ * 2) Check if there is SW CTF
+ */
+ table_context->thermal_controller_type = 0;
+
+ return 0;
+}
+
+static int smu_v15_0_8_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_15_0_8 & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v15_0_8_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
+static int smu_v15_0_8_fru_get_product_info(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct amdgpu_fru_info *fru_info;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber,
+ sizeof(fru_info->product_number));
+ strscpy(fru_info->product_name, static_metrics->ProductInfo.Name,
+ sizeof(fru_info->product_name));
+ strscpy(fru_info->serial, static_metrics->ProductInfo.Serial,
+ sizeof(fru_info->serial));
+ strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName,
+ sizeof(fru_info->manufacturer_name));
+ strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId,
+ sizeof(fru_info->fru_id));
+
+ return 0;
+}
+
+static void smu_v15_0_8_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ uint16_t max_speed;
+ uint8_t max_width;
+
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
+}
+
+static int smu_v15_0_8_set_driver_pptable(struct smu_context *smu)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret, i, n;
+ uint32_t table_version;
+
+ if (!pptable->init) {
+ ret = smu_v15_0_8_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGfxclkFrequency);
+ pptable->MaxFclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxFclkFrequency);
+ pptable->MinFclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinFclkFrequency);
+ pptable->MaxGl2clkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGl2clkFrequency);
+ pptable->MinGl2clkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGl2clkFrequency);
+
+ for (i = 0; i < ARRAY_SIZE(static_metrics->UclkFrequencyTable); ++i)
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]);
+
+ pptable->SocclkFrequency = SMUQ10_ROUND(static_metrics->SocclkFrequency);
+ pptable->LclkFrequency = SMUQ10_ROUND(static_metrics->LclkFrequency);
+ pptable->VclkFrequency = SMUQ10_ROUND(static_metrics->VclkFrequency);
+ pptable->DclkFrequency = SMUQ10_ROUND(static_metrics->DclkFrequency);
+
+ pptable->CTFLimitMID = SMUQ10_ROUND(static_metrics->CTFLimit_MID);
+ pptable->CTFLimitAID = SMUQ10_ROUND(static_metrics->CTFLimit_AID);
+ pptable->CTFLimitXCD = SMUQ10_ROUND(static_metrics->CTFLimit_XCD);
+ pptable->CTFLimitHBM = SMUQ10_ROUND(static_metrics->CTFLimit_HBM);
+ pptable->ThermalLimitMID = SMUQ10_ROUND(static_metrics->ThermalLimit_MID);
+ pptable->ThermalLimitAID = SMUQ10_ROUND(static_metrics->ThermalLimit_AID);
+ pptable->ThermalLimitXCD = SMUQ10_ROUND(static_metrics->ThermalLimit_XCD);
+ pptable->ThermalLimitHBM = SMUQ10_ROUND(static_metrics->ThermalLimit_HBM);
+
+ /* use MID0 serial number by default */
+ pptable->PublicSerialNumberMID =
+ static_metrics->PublicSerialNumber_MID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumberMID);
+ pptable->PublicSerialNumberAID =
+ static_metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumberXCD =
+ static_metrics->PublicSerialNumber_XCD[0];
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_MID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_MID, i,
+ static_metrics->PublicSerialNumber_MID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
+ ret = smu_v15_0_8_fru_get_product_info(smu, static_metrics);
+ if (ret)
+ return ret;
+ pptable->PPT1Max = static_metrics->PPT1Max;
+ pptable->PPT1Min = static_metrics->PPT1Min;
+ pptable->PPT1Default = static_metrics->PPT1Default;
+
+ if (static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ smu_v15_0_8_init_xgmi_data(smu, static_metrics);
+ pptable->init = true;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_v15_0_8_set_driver_pptable(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v15_0_8_set_dpm_table(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int smu_v15_0_8_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_15_0_power_context *power_context = smu_power->power_context;
+ uint32_t client_id = entry->client_id;
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t src_id = entry->src_id;
+ uint32_t data;
+
+ if (client_id == SOC_V1_0_IH_CLIENTID_MP1) {
+ if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+ /*
+ * ctxid is used to distinguish different events for SMCToHost
+ * interrupt.
+ */
+ switch (ctxid) {
+ case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
+ /*
+ * Increment the throttle interrupt counter
+ */
+ atomic64_inc(&smu->throttle_int_counter);
+
+ if (!atomic_read(&adev->throttling_logging_enabled))
+ return 0;
+
+ /* This uses the new method which fixes the
+ * incorrect throttling status reporting
+ * through metrics table. For older FWs,
+ * it will be ignored.
+ */
+ if (__ratelimit(&adev->throttling_logging_rs)) {
+ atomic_set(
+ &power_context->throttle_status,
+ entry->src_data[1]);
+ schedule_work(&smu->throttling_logging_work);
+ }
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v15_0_8_irq_funcs = {
+ .set = smu_v15_0_8_set_irq_state,
+ .process = smu_v15_0_8_irq_process,
+};
+
+static int smu_v15_0_8_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v15_0_8_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_MP1,
+ IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v15_0_8_notify_unload(struct smu_context *smu)
+{
+ if (amdgpu_in_reset(smu->adev))
+ return 0;
+
+ dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
+ /* Ignore return, just intimate FW that driver is not going to be there */
+ smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
+ return 0;
+}
+
+
+static int smu_v15_0_8_system_features_control(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (enable)
+ ret = smu_v15_0_system_features_control(smu, enable);
+ else
+ smu_v15_0_8_notify_unload(smu);
+
+ return ret;
+}
+
+/**
+ * smu_v15_0_8_get_enabled_mask - Get enabled SMU features (128-bit)
+ * @smu: SMU context
+ * @feature_mask: feature mask structure
+ *
+ * SMU 15 returns all 128 feature bits in a single message via out_args[0..3].
+ * For backward compatibility, this function returns only the first 64 bits.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int smu_v15_0_8_get_enabled_mask(struct smu_context *smu,
+ struct smu_feature_bits *feature_mask)
+{
+ struct smu_msg_args args = {
+ .msg = SMU_MSG_GetEnabledSmuFeatures,
+ .num_args = 0,
+ .num_out_args = 2,
+ };
+ int ret;
+
+ if (!feature_mask)
+ return -EINVAL;
+
+ ret = smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
+
+ if (ret)
+ return ret;
+
+ smu_feature_bits_from_arr32(feature_mask, args.out_args,
+ SMU_FEATURE_NUM_DEFAULT);
+
+ return 0;
+}
+
+static bool smu_v15_0_8_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_feature_bits feature_enabled;
+
+ ret = smu_v15_0_8_get_enabled_mask(smu, &feature_enabled);
+ if (ret)
+ return false;
+
+ return smu_feature_bits_test_mask(&feature_enabled,
+ smu_v15_0_8_dpm_features.bits);
+}
+
+static ssize_t smu_v15_0_8_get_pm_metrics(struct smu_context *smu,
+ void *metrics, size_t max_size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_pm_metrics *pm_metrics = (struct amdgpu_pm_metrics *)metrics;
+ uint32_t table_version = smu_table->tables[SMU_TABLE_SMU_METRICS].version;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ uint32_t pmfw_version;
+ int ret;
+
+ if (!pm_metrics || !max_size)
+ return -EINVAL;
+
+ if (max_size < (table_size + sizeof(pm_metrics->common_header)))
+ return -EOVERFLOW;
+
+ /* Don't use cached metrics data */
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 0, pm_metrics->data);
+ if (ret)
+ return ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+ memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header));
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
+ pm_metrics->common_header.pmfw_version = pmfw_version;
+ pm_metrics->common_header.pmmetrics_version = table_version;
+ pm_metrics->common_header.structure_size =
+ sizeof(pm_metrics->common_header) + table_size;
+
+ return pm_metrics->common_header.structure_size;
+}
+
+static int smu_v15_0_8_mode2_reset(struct smu_context *smu)
+{
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+ int ret = 0;
+
+ mutex_lock(&ctl->lock);
+
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2);
+
+ if (ret)
+ goto out;
+
+ /* Reset takes a bit longer, wait for 200ms. */
+ msleep(200);
+
+ dev_dbg(adev->dev, "wait for reset ack\n");
+ do {
+ ret = smu_msg_wait_response(ctl, 0);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret)
+ goto out;
+
+ } while (ret == -ETIME && timeout);
+
+out:
+ mutex_unlock(&ctl->lock);
+
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
+ return ret;
+}
+
+static bool smu_v15_0_8_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ return true;
+ return false;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void smu_v15_0_8_fill_baseboard_temp_metrics(
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics,
+ const SystemMetricsTable_t *metrics)
+{
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ baseboard_temp_metrics->system_temp_ubb_fpga =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA];
+ baseboard_temp_metrics->system_temp_ubb_front =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FRONT];
+ baseboard_temp_metrics->system_temp_ubb_back =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_BACK];
+ baseboard_temp_metrics->system_temp_ubb_oam7 =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_OAM7];
+ baseboard_temp_metrics->system_temp_ubb_ibc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_IBC];
+ baseboard_temp_metrics->system_temp_ubb_ufpga =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_UFPGA];
+ baseboard_temp_metrics->system_temp_ubb_oam1 =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_OAM1];
+ baseboard_temp_metrics->system_temp_oam_0_1_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_0_1_HSC];
+ baseboard_temp_metrics->system_temp_oam_2_3_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_2_3_HSC];
+ baseboard_temp_metrics->system_temp_oam_4_5_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_4_5_HSC];
+ baseboard_temp_metrics->system_temp_oam_6_7_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_6_7_HSC];
+ baseboard_temp_metrics->system_temp_ubb_fpga_0v72_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA_0V72_VR];
+ baseboard_temp_metrics->system_temp_ubb_fpga_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_UBB_FPGA_3V3_VR];
+ baseboard_temp_metrics->system_temp_retimer_0_1_2_3_1v2_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR];
+ baseboard_temp_metrics->system_temp_retimer_4_5_6_7_1v2_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR];
+ baseboard_temp_metrics->system_temp_retimer_0_1_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_0_1_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_4_5_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_4_5_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_2_3_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_2_3_0V9_VR];
+ baseboard_temp_metrics->system_temp_retimer_6_7_0v9_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_RETIMER_6_7_0V9_VR];
+ baseboard_temp_metrics->system_temp_oam_0_1_2_3_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR];
+ baseboard_temp_metrics->system_temp_oam_4_5_6_7_3v3_vr =
+ metrics->SystemTemperatures[SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR];
+ baseboard_temp_metrics->system_temp_ibc_hsc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_IBC_HSC];
+ baseboard_temp_metrics->system_temp_ibc =
+ metrics->SystemTemperatures[SYSTEM_TEMP_IBC];
+}
+
+static void smu_v15_0_8_fill_gpuboard_temp_metrics(
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics,
+ const SystemMetricsTable_t *metrics)
+{
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ gpuboard_temp_metrics->node_temp_retimer =
+ metrics->NodeTemperatures[NODE_TEMP_RETIMER];
+ gpuboard_temp_metrics->node_temp_ibc =
+ metrics->NodeTemperatures[NODE_TEMP_IBC_TEMP];
+ gpuboard_temp_metrics->node_temp_ibc_2 =
+ metrics->NodeTemperatures[NODE_TEMP_IBC_2_TEMP];
+ gpuboard_temp_metrics->node_temp_vdd18_vr =
+ metrics->NodeTemperatures[NODE_TEMP_VDD18_VR_TEMP];
+ gpuboard_temp_metrics->node_temp_04_hbm_b_vr =
+ metrics->NodeTemperatures[NODE_TEMP_04_HBM_B_VR_TEMP];
+ gpuboard_temp_metrics->node_temp_04_hbm_d_vr =
+ metrics->NodeTemperatures[NODE_TEMP_04_HBM_D_VR_TEMP];
+
+ gpuboard_temp_metrics->vr_temp_vddcr_socio_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_SOCIO_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_socio_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_SOCIO_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_x0 =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_X0_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_x1 =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_X1_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_04_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_04_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_04_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_04_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_hbm_b =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_HBM_B_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_hbm_d =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_HBM_D_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_11_gta_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_11_GTA_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_11_gta_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_11_GTA_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075_gta_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_GTA_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075_gta_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_GTA_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddcr_075_ucie =
+ metrics->VrTemperatures[SVI_PLANE_VDDCR_075_UCIE_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieaa =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAA_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieam_a =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAM_A_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddio_065_ucieam_c =
+ metrics->VrTemperatures[SVI_PLANE_VDDIO_065_UCIEAM_C_TEMP];
+ gpuboard_temp_metrics->vr_temp_vddan_075 =
+ metrics->VrTemperatures[SVI_PLANE_VDDAN_075_TEMP];
+}
+
+static ssize_t smu_v15_0_8_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type,
+ void *table)
+{
+ struct smu_v15_0_8_baseboard_temp_metrics *baseboard_temp_metrics;
+ struct smu_v15_0_8_gpuboard_temp_metrics *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ ret = smu_v15_0_8_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (type) {
+ case SMU_TEMP_METRIC_GPUBOARD:
+ gpuboard_temp_metrics =
+ (struct smu_v15_0_8_gpuboard_temp_metrics *)
+ smu_driver_table_ptr(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_v15_0_8_fill_gpuboard_temp_metrics(gpuboard_temp_metrics,
+ metrics);
+ memcpy(table, gpuboard_temp_metrics, sizeof(*gpuboard_temp_metrics));
+ return sizeof(*gpuboard_temp_metrics);
+ case SMU_TEMP_METRIC_BASEBOARD:
+ baseboard_temp_metrics =
+ (struct smu_v15_0_8_baseboard_temp_metrics *)
+ smu_driver_table_ptr(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_v15_0_8_fill_baseboard_temp_metrics(baseboard_temp_metrics,
+ metrics);
+ memcpy(table, baseboard_temp_metrics, sizeof(*baseboard_temp_metrics));
+ return sizeof(*baseboard_temp_metrics);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t smu_v15_0_8_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_v15_0_8_gpu_metrics *gpu_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j, idx;
+ uint32_t aid_mask = adev->aid_mask;
+ uint32_t mid_mask = adev->aid_mask;
+ MetricsTable_t *metrics;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+
+ ret = smu_v15_0_8_get_metrics_table_internal(smu, 1, NULL);
+ if (ret)
+ return ret;
+
+ metrics = (MetricsTable_t *)smu_table->metrics_table;
+ gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+ SMU_DRIVER_TABLE_GPU_METRICS);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature);
+
+ /* Per-HBM stack temperatures */
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask)/4 > SMU_15_0_8_MAX_HBM_STACKS))
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ SMUQ10_ROUND(metrics->HbmTemperature[stack_idx]);
+ }
+ }
+ }
+
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc = SMUQ10_ROUND(metrics->MaxVrTemperature);
+ /* MID, AID, XCD temperatures */
+ idx = 0;
+ for_each_inst(i, mid_mask) {
+ gpu_metrics->temperature_mid[idx] = SMUQ10_ROUND(metrics->MidTemperature[i]);
+ idx++;
+ }
+
+ idx = 0;
+ for_each_inst(i, aid_mask) {
+ gpu_metrics->temperature_aid[idx] = SMUQ10_ROUND(metrics->AidTemperature[i]);
+ idx++;
+ }
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->temperature_xcd[i] = SMUQ10_ROUND(metrics->XcdTemperature[xcc_id]);
+ }
+ /* Power */
+ gpu_metrics->curr_socket_power = SMUQ10_ROUND(metrics->SocketPower);
+
+ gpu_metrics->average_gfx_activity = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ gpu_metrics->mem_max_bandwidth = SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0) {
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ }
+ }
+
+ /* Per-MID clocks */
+ idx = 0;
+ for_each_inst(i, mid_mask) {
+ gpu_metrics->current_socclk[idx] = SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ idx++;
+ }
+
+ /* Per-VCN clocks */
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] = SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ gpu_metrics->current_dclk0[i] = SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ }
+ }
+
+ /* Per-AID clocks */
+ idx = 0;
+ for_each_inst(i, aid_mask) {
+ gpu_metrics->current_uclk[idx] = SMUQ10_ROUND(metrics->UclkFrequency[i]);
+ idx++;
+ }
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+ gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+ gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+ gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+ gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+ gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[j] = ret;
+ }
+
+ gpu_metrics->xgmi_read_data_acc = SMUQ10_ROUND(metrics->XgmiReadBandwidthAcc);
+ gpu_metrics->xgmi_write_data_acc = SMUQ10_ROUND(metrics->XgmiWriteBandwidthAcc);
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_ppt_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_thm_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->gfx_low_utilization_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc[i] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+
+ gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
+ gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
+
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+ *table = gpu_metrics;
+
+ smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPU_METRICS);
+
+ return sizeof(*gpu_metrics);
+}
+
+static void smu_v15_0_8_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+
+ adev->unique_id = pptable->PublicSerialNumberMID;
+}
+
+static int smu_v15_0_8_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ uint32_t power_limit = 0;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Couldn't get PPT limit");
+ return -EINVAL;
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+
+ if (default_power_limit)
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+
+ if (max_power_limit)
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+
+ if (min_power_limit)
+ *min_power_limit = 0;
+
+ return 0;
+}
+
+static int smu_v15_0_8_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_15_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
+
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
+ return 0;
+}
+
+static int smu_v15_0_8_set_gfx_soft_freq_limited_range(struct smu_context *smu,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ max & 0xffff, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
+ min & 0xffff, NULL);
+
+ return ret;
+}
+
+static int smu_v15_0_8_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_15_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ /* Determinism not supported on SMU v15.0.8 */
+ ret = -EOPNOTSUPP;
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ /* Restore GFXCLK to default range */
+ if ((SMU_DPM_TABLE_MIN(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.min) ||
+ (SMU_DPM_TABLE_MAX(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.max)) {
+ ret = smu_v15_0_8_set_gfx_soft_freq_limited_range(
+ smu, SMU_DPM_TABLE_MIN(gfx_table),
+ SMU_DPM_TABLE_MAX(gfx_table));
+ if (ret)
+ goto out;
+
+ pstate_table->gfxclk_pstate.curr.min =
+ SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ }
+
+ /* Restore UCLK to default max */
+ if (SMU_DPM_TABLE_MAX(uclk_table) !=
+ pstate_table->uclk_pstate.curr.max) {
+ /* Min UCLK is not expected to be changed */
+ ret = smu_v15_0_set_soft_freq_limited_range(smu,
+ SMU_UCLK, 0,
+ SMU_DPM_TABLE_MAX(uclk_table),
+ false);
+ if (ret)
+ goto out;
+
+ pstate_table->uclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(uclk_table);
+ }
+
+ if (ret)
+ goto out;
+
+ smu_cmn_reset_custom_level(smu);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static int smu_v15_0_8_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max,
+ bool automatic)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
+ clk_type != SMU_UCLK)
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (min >= max) {
+ dev_err(smu->adev->dev,
+ "Minimum clk should be less than the maximum allowed clock\n");
+ return -EINVAL;
+ }
+
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) {
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v15_0_8_set_gfx_soft_freq_limited_range(smu,
+ min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+
+ if (clk_type == SMU_UCLK) {
+ if (max == pstate_table->uclk_pstate.curr.max)
+ return 0;
+
+ ret = smu_v15_0_set_soft_freq_limited_range(smu,
+ SMU_UCLK,
+ 0, max,
+ false);
+ if (!ret)
+ pstate_table->uclk_pstate.curr.max = max;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct smu_15_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ uint32_t min_clk, max_clk;
+ int ret;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+ if (input[0] == 0) {
+ if (input[1] < min_clk) {
+ dev_warn(smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1], min_clk);
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.min = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "UCLK_LIMITS setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min UCLK level is not supported");
+ return -EINVAL;
+ } else if (input[0] == 1) {
+ if (input[1] > max_clk) {
+ dev_warn(smu->adev->dev,
+ "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
+ pstate_table->uclk_pstate.custom.max =
+ pstate_table->uclk_pstate.curr.max;
+
+ return -EINVAL;
+ }
+
+ pstate_table->uclk_pstate.custom.max = input[1];
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ /* Use the default frequencies for manual mode */
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.uclk_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_UCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ smu_cmn_reset_custom_level(smu);
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!pstate_table->gfxclk_pstate.custom.min)
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+
+ if (!pstate_table->gfxclk_pstate.custom.max)
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+
+ min_clk = pstate_table->gfxclk_pstate.custom.min;
+ max_clk = pstate_table->gfxclk_pstate.custom.max;
+
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+
+ /* Commit UCLK custom range (only max supported) */
+ if (pstate_table->uclk_pstate.custom.max) {
+ min_clk = pstate_table->uclk_pstate.curr.min;
+ max_clk = pstate_table->uclk_pstate.custom.max;
+ ret = smu_v15_0_8_set_soft_freq_limited_range(smu,
+ SMU_UCLK,
+ min_clk, max_clk,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int smu_v15_0_8_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ uint32_t max_ctf, max_thm;
+
+ if (amdgpu_sriov_multi_vf_mode(smu->adev))
+ return 0;
+
+ if (!range)
+ return -EINVAL;
+
+ /* CTF (Critical Temperature Fault) limits */
+ max_ctf = max3(pptable->CTFLimitMID, pptable->CTFLimitXCD,
+ pptable->CTFLimitAID);
+ range->hotspot_emergency_max = max_ctf * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ range->mem_emergency_max = pptable->CTFLimitHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Thermal throttling limits */
+ max_thm = max3(pptable->ThermalLimitMID, pptable->ThermalLimitXCD,
+ pptable->ThermalLimitAID);
+ range->hotspot_crit_max = max_thm * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ range->mem_crit_max = pptable->ThermalLimitHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu_v15_0_8_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret;
+
+ if (limit_type == SMU_FAST_PPT_LIMIT) {
+ if (!pptable->PPT1Max)
+ return -EOPNOTSUPP;
+
+ if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
+ dev_err(smu->adev->dev,
+ "New PPT1 limit (%d) should be between min %d and max %d\n",
+ limit, pptable->PPT1Min, pptable->PPT1Max);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
+ limit, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
+
+ return ret;
+ }
+
+ return smu_v15_0_set_power_limit(smu, limit_type, limit);
+}
+
+static int smu_v15_0_8_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ PPTable_t *pptable = (PPTable_t *)smu_table->driver_pptable;
+ int ret = 0;
+
+ if (!ppt_limit)
+ return -EINVAL;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ if (!pptable->PPT1Max)
+ return -EOPNOTSUPP;
+
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = pptable->PPT1Max;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit,
+ ppt_limit);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Get fast PPT limit failed!\n");
+ break;
+ case SMU_PPT_LIMIT_DEFAULT:
+ *ppt_limit = pptable->PPT1Default;
+ break;
+ case SMU_PPT_LIMIT_MIN:
+ *ppt_limit = pptable->PPT1Min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct pptable_funcs smu_v15_0_8_ppt_funcs = {
+ .init_allowed_features = smu_v15_0_8_init_allowed_features,
+ .set_default_dpm_table = smu_v15_0_8_set_default_dpm_table,
+ .is_dpm_running = smu_v15_0_8_is_dpm_running,
+ .init_smc_tables = smu_v15_0_8_init_smc_tables,
+ .fini_smc_tables = smu_v15_0_8_fini_smc_tables,
+ .init_power = smu_v15_0_init_power,
+ .fini_power = smu_v15_0_fini_power,
+ .check_fw_status = smu_v15_0_8_check_fw_status,
+ .check_fw_version = smu_cmn_check_fw_version,
+ .set_driver_table_location = smu_v15_0_set_driver_table_location,
+ .set_tool_table_location = smu_v15_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v15_0_notify_memory_pool_location,
+ .system_features_control = smu_v15_0_8_system_features_control,
+ .get_enabled_mask = smu_v15_0_8_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .register_irq_handler = smu_v15_0_8_register_irq_handler,
+ .setup_pptable = smu_v15_0_8_setup_pptable,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .wait_for_event = smu_v15_0_wait_for_event,
+ .get_pm_metrics = smu_v15_0_8_get_pm_metrics,
+ .mode2_reset = smu_v15_0_8_mode2_reset,
+ .get_dpm_ultimate_freq = smu_v15_0_8_get_dpm_ultimate_freq,
+ .get_gpu_metrics = smu_v15_0_8_get_gpu_metrics,
+ .get_unique_id = smu_v15_0_8_get_unique_id,
+ .get_power_limit = smu_v15_0_8_get_power_limit,
+ .set_power_limit = smu_v15_0_8_set_power_limit,
+ .get_ppt_limit = smu_v15_0_8_get_ppt_limit,
+ .emit_clk_levels = smu_v15_0_8_emit_clk_levels,
+ .read_sensor = smu_v15_0_8_read_sensor,
+ .populate_umd_state_clk = smu_v15_0_8_populate_umd_state_clk,
+ .set_performance_level = smu_v15_0_8_set_performance_level,
+ .od_edit_dpm_table = smu_v15_0_8_od_edit_dpm_table,
+ .get_thermal_temperature_range = smu_v15_0_8_get_thermal_temperature_range,
+};
+
+static void smu_v15_0_8_init_msg_ctl(struct smu_context *smu,
+ const struct cmn2asic_msg_mapping *message_map)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+
+ ctl->smu = smu;
+ mutex_init(&ctl->lock);
+ ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_40);
+ ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_41);
+ ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_42);
+ ctl->config.arg_regs[1] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_43);
+ ctl->config.arg_regs[2] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_44);
+ ctl->config.arg_regs[3] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_45);
+ ctl->config.num_arg_regs = 4;
+ ctl->ops = &smu_msg_v1_ops;
+ ctl->default_timeout = adev->usec_timeout * 20;
+ ctl->message_map = message_map;
+}
+
+static const struct smu_temp_funcs smu_v15_0_8_temp_funcs = {
+ .temp_metrics_is_supported = smu_v15_0_8_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v15_0_8_get_temp_metrics,
+};
+
+void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v15_0_8_ppt_funcs;
+ smu->clock_map = smu_v15_0_8_clk_map;
+ smu->feature_map = smu_v15_0_8_feature_mask_map;
+ smu->table_map = smu_v15_0_8_table_map;
+ smu_v15_0_8_init_msg_ctl(smu, smu_v15_0_8_message_map);
+ smu->smu_temp.temp_funcs = &smu_v15_0_8_temp_funcs;
+ smu->smc_driver_if_version = SMU15_DRIVER_IF_VERSION_SMU_V15_0_8;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
new file mode 100644
index 000000000000..398ce4482174
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_15_0_8_PPT_H__
+#define __SMU_15_0_8_PPT_H__
+
+#define SMU_15_0_8_NUM_XGMI_LINKS 8
+#define SMU_15_0_8_MAX_GFX_CLKS 8
+#define SMU_15_0_8_MAX_CLKS 4
+#define SMU_15_0_8_MAX_XCC 8
+#define SMU_15_0_8_MAX_VCN 4
+#define SMU_15_0_8_MAX_JPEG 40
+#define SMU_15_0_8_MAX_AID 2
+#define SMU_15_0_8_MAX_MID 2
+#define SMU_15_0_8_MAX_HBM_STACKS 12
+extern void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu);
+
+typedef struct {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t MaxFclkFrequency;
+ uint32_t MinFclkFrequency;
+ uint32_t MaxGl2clkFrequency;
+ uint32_t MinGl2clkFrequency;
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t CTFLimitMID;
+ uint32_t CTFLimitAID;
+ uint32_t CTFLimitXCD;
+ uint32_t CTFLimitHBM;
+ uint32_t ThermalLimitMID;
+ uint32_t ThermalLimitAID;
+ uint32_t ThermalLimitXCD;
+ uint32_t ThermalLimitHBM;
+ uint64_t PublicSerialNumberMID;
+ uint64_t PublicSerialNumberAID;
+ uint64_t PublicSerialNumberXCD;
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+ bool init;
+} PPTable_t;
+
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 15.0.8 GPU metrics*/
+#define SMU_15_0_8_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hotspot); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mem); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_vrsoc); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_15_0_8_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_MID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mid, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_15_0_8_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_15_0_8_MAX_XCC); \
+ SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
+ SMU_MTYPE(U16), curr_socket_power); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_gfx_activity); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_umc_activity); \
+ SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), mem_max_bandwidth); \
+ SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), energy_accumulator); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
+ SMU_MTYPE(U64), system_clock_counter); \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), prochot_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), ppt_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), socket_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), vr_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), hbm_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), gfxclk_lock_status); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), pcie_link_width); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
+ SMU_MTYPE(U16), pcie_link_speed); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_width); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
+ SMU_MTYPE(U16), xgmi_link_speed); \
+ SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), mem_activity_acc); \
+ SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_bandwidth_acc, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U32), pcie_bandwidth_inst, SMU_15_0_8_MAX_MID); \
+ SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_nak_sent_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_nak_rcvd_count_acc); \
+ SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_status, \
+ SMU_15_0_8_NUM_XGMI_LINKS); \
+ SMU_SCALAR(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_read_data_acc); \
+ SMU_SCALAR(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_write_data_acc); \
+ SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
+ SMU_MTYPE(U64), firmware_timestamp); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_15_0_8_MAX_GFX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_15_0_8_MAX_MID); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk, SMU_15_0_8_MAX_AID); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
+ SMU_MUNIT(NONE), SMU_MTYPE(U64), \
+ pcie_lc_perf_other_end_recovery); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_15_0_8_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_15_0_8_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(NONE), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_15_0_8_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_15_0_8_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpu_metrics, SMU_15_0_8_METRICS_FIELDS);
+
+/* Maximum temperature sensor counts for system metrics */
+#define SMU_15_0_8_MAX_SYSTEM_TEMP_ENTRIES 32
+#define SMU_15_0_8_MAX_NODE_TEMP_ENTRIES 12
+#define SMU_15_0_8_MAX_VR_TEMP_ENTRIES 22
+
+/* SMUv 15.0.8 GPU board temperature metrics */
+#define SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), label_version); \
+ SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), node_id); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_RETIMER), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_retimer); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_ibc); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC_2), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_ibc_2); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_VDD18_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_vdd18_vr); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_B_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_04_hbm_b_vr); \
+ SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_D_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), node_temp_04_hbm_d_vr); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_socio_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_socio_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X0), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_x0); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X1), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_x1); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_04_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_04_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_B), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_b); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_D), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_d); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_11_gta_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_11_gta_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075_gta_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075_gta_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_UCIE), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddcr_075_ucie); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieaa); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_A), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_a); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_C), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_c); \
+ SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), vr_temp_vddan_075);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpuboard_temp_metrics,
+ SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS);
+
+/* SMUv 15.0.8 Baseboard temperature metrics - ID-based approach */
+#define SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), label_version); \
+ SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), node_id); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FRONT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_front); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_BACK), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_back); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM7), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_oam7); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_ibc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_UFPGA), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_ufpga); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM1), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_oam1); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_0_1_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_2_3_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_2_3_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_4_5_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_6_7_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_6_7_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_0V72_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga_0v72_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ubb_fpga_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_0_1_2_3_1v2_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_4_5_6_7_1v2_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_0_1_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_4_5_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_2_3_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_2_3_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_6_7_0V9_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_retimer_6_7_0v9_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_0_1_2_3_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_oam_4_5_6_7_3v3_vr); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC_HSC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ibc_hsc); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(S16), system_temp_ibc);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_baseboard_temp_metrics,
+ SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS);
+#endif
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index a644579903f4..7bd8c435466a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1057,10 +1057,6 @@ int smu_cmn_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
- if_version != smu->smc_driver_if_version)
- dev_info(adev->dev, "SMU driver if version not matched\n");
-
return 0;
}
@@ -1305,6 +1301,16 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
}
}
+void smu_cmn_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+}
+
static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)
{
/* Frequencies within 25 MHz are considered equal */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index e4d282d8bcae..b76e86df5da7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -204,6 +204,7 @@ int smu_cmn_print_pcie_levels(struct smu_context *smu,
struct smu_pcie_table *pcie_table,
uint32_t cur_gen, uint32_t cur_lane,
char *buf, int *offset);
+void smu_cmn_reset_custom_level(struct smu_context *smu);
int smu_cmn_dpm_pcie_gen_idx(int gen);
int smu_cmn_dpm_pcie_width_idx(int width);
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
index e889baec378b..49b3aa7489ff 100644
--- a/drivers/gpu/drm/amd/ras/rascore/ras_core.c
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
@@ -69,6 +69,9 @@ int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
int seconds_per_minute = 60;
int days, remaining_seconds;
+ if (!tm)
+ return -EINVAL;
+
days = div64_u64_rem(timestamp, seconds_per_day, &remainder);
/* remainder will always be less than seconds_per_day. */
remaining_seconds = remainder;
@@ -116,6 +119,9 @@ bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
{
uint32_t status = 0;
+ if (!ras_core)
+ return false;
+
if (ras_core->sys_fn &&
ras_core->sys_fn->check_gpu_status)
ras_core->sys_fn->check_gpu_status(ras_core, &status);
@@ -127,6 +133,9 @@ bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
{
uint32_t status = 0;
+ if (!ras_core)
+ return false;
+
if (ras_core->sys_fn &&
ras_core->sys_fn->check_gpu_status)
ras_core->sys_fn->check_gpu_status(ras_core, &status);
@@ -271,6 +280,9 @@ struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
struct ras_core_context *ras_core;
struct ras_core_config *config;
+ if (!init_config)
+ return NULL;
+
ras_core = kzalloc_obj(*ras_core);
if (!ras_core)
return NULL;
@@ -479,6 +491,9 @@ int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
{
+ if (!ras_core)
+ return 0;
+
if (ras_core->ras_nbio.ip_func &&
ras_core->ras_nbio.ip_func->get_memory_partition_mode)
return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
@@ -562,6 +577,8 @@ bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
{
+ if (!ras_core || !gpu_mem)
+ return -EINVAL;
if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
@@ -572,6 +589,8 @@ int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
{
+ if (!ras_core || !gpu_mem)
+ return -EINVAL;
if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
@@ -625,6 +644,9 @@ int ras_core_event_notify(struct ras_core_context *ras_core,
int ras_core_get_device_system_info(struct ras_core_context *ras_core,
struct device_system_info *dev_info)
{
+ if (!dev_info)
+ return -EINVAL;
+
if (ras_core && ras_core->sys_fn &&
ras_core->sys_fn->get_device_system_info)
return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
index 23118f41eb96..c7ae005ea705 100644
--- a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
@@ -222,7 +222,7 @@ int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core)
mutex_lock(&ras_umc->pending_ecc_lock);
list_for_each_entry_safe(ecc_node,
tmp, &ras_umc->pending_ecc_list, node){
- if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
+ if (!ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
list_del(&ecc_node->node);
kfree(ecc_node);
}