summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaarten Lankhorst <dev@lankhorst.se>2025-11-10 14:42:54 +0100
committerMaarten Lankhorst <dev@lankhorst.se>2025-11-10 14:42:54 +0100
commit410d88782af805804ebe8cd8a275bd71f88bf399 (patch)
tree16a9911c3d034d5945c048b1b1987985db70143f
parent95eacb81d0d98775c9eb71dc13e6ef24110766b6 (diff)
parente237dfe70867f02de223e36340fe5f8b0fe0eada (diff)
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Backmerge to prevent getting out of sync with drm-next too much. Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c293
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c425
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c342
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c291
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c286
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c369
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2824
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c)84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c)459
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h)36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1408
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1433
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h32
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h323
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c50
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c40
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c31
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c19
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h14
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h119
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c70
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c2
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c181
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c611
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h78
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c126
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h37
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c273
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h368
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c310
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c310
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c95
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c706
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c156
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c59
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c7
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c313
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c437
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c131
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c319
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c263
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c198
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2000
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h44
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c428
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c498
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h5
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c234
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h17
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c167
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c341
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h3
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c4
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c149
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c107
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c77
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/xe/Makefile8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h105
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c27
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c66
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c61
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c50
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c16
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c29
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h1
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c35
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c46
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c2
-rw-r--r--drivers/gpu/drm/xe/xe_map.h4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c132
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c29
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c26
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c28
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c6
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c39
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c46
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h6
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c10
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c4
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/display/drm_dp_helper.h14
-rw-r--r--include/drm/drm_crtc.h18
-rw-r--r--include/drm/intel/display_member.h42
-rw-r--r--include/drm/intel/display_parent_interface.h45
-rw-r--r--include/drm/intel/pciids.h9
657 files changed, 33399 insertions, 4792 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 64e7acff8f18..ebe08947c5a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
- -I$(FULL_AMD_PATH)/amdkfd
+ -I$(FULL_AMD_PATH)/amdkfd \
+ -I$(FULL_AMD_PATH)/ras/ras_mgr
# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
@@ -324,4 +325,9 @@ amdgpu-y += \
isp_v4_1_1.o
endif
+AMD_GPU_RAS_PATH := ../ras
+AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras
+include $(AMD_GPU_RAS_FULL_PATH)/Makefile
+amdgpu-y += $(AMD_GPU_RAS_FILES)
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 9569dc16dd3d..daa7b23bc775 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
uint32_t ip_block;
int r, i;
+ /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */
+ if (adev->aid_mask)
+ ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6f5b4a0e0a34..50079209c472 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -372,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
-#define AMDGPU_MAX_IP_NUM 16
+#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM
struct amdgpu_ip_block_status {
bool valid;
@@ -839,8 +841,6 @@ struct amd_powerplay {
const struct amd_pm_funcs *pp_funcs;
};
-struct ip_discovery_top;
-
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -972,8 +972,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct debugfs_blob_wrapper debugfs_discovery_blob;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1063,6 +1062,9 @@ struct amdgpu_device {
u32 log2_max_MBps;
} mm_stats;
+ /* discovery*/
+ struct amdgpu_discovery_info discovery;
+
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
@@ -1174,6 +1176,12 @@ struct amdgpu_device {
* queue fence.
*/
struct xarray userq_xa;
+ /**
+ * @userq_doorbell_xa: Global user queue map (doorbell index → queue)
+ * Key: doorbell_index (unique global identifier for the queue)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_doorbell_xa;
/* df */
struct amdgpu_df df;
@@ -1265,8 +1273,6 @@ struct amdgpu_device {
struct list_head ras_list;
- struct ip_discovery_top *ip_top;
-
struct amdgpu_reset_domain *reset_domain;
struct mutex benchmark_mutex;
@@ -1309,8 +1315,6 @@ struct amdgpu_device {
*/
bool apu_prefer_gtt;
- struct list_head userq_mgr_list;
- struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
struct amdgpu_uid *uid_info;
@@ -1638,7 +1642,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 6c62e27b9800..d31460a9e958 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index a2879d2b7c8e..644f79f3c9af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -36,6 +36,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_umc.h"
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* Total memory size in system memory and all GPU VRAM. Used to
* estimate worst case amount of memory to reserve for page tables
@@ -746,6 +747,20 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset)
{
+
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = block;
+ ih_info.pasid = pasid;
+ ih_info.reset = reset;
+ ih_info.pasid_fn = pasid_fn;
+ ih_info.data = data;
+ amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
+ return;
+ }
+
amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 9e120c934cc1..8bdfcde2029b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -71,7 +71,7 @@ struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a2ca9acf8c4e..96ccd5ade031 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
@@ -1113,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
@@ -1916,7 +1923,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
@@ -1954,9 +1961,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
if (size) {
if (!is_imported &&
- (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- (adev->apu_prefer_gtt &&
- mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
*size = bo_size;
else
*size = 0;
@@ -2542,7 +2547,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
@@ -2566,9 +2571,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
}
+ mem->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ amdgpu_hmm_range_free(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
@@ -2741,8 +2751,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index c7d32fb216e4..636385c80f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
- if (frev == 2 && crev == 1) {
- fw_usage_v2_1 =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_1(adev,
- fw_usage_v2_1,
- &usage_bytes);
- } else if (frev >= 2 && crev >= 2) {
- fw_usage_v2_2 =
- (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_2(adev,
- fw_usage_v2_2,
- &usage_bytes);
+ /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
+ if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 00e96419fcda..35d04e69aec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
- * For SR-IOV, the vbios image is also put in VRAM in the VF.
+ * For SR-IOV, if dynamic critical region is not enabled,
+ * the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
+ u32 size = 256U * 1024U; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
@@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap_wc(vram_base, size);
- if (!bios)
- return false;
adev->bios = kmalloc(size, GFP_KERNEL);
- if (!adev->bios) {
- iounmap(bios);
+ if (!adev->bios)
return false;
+
+ /* For SRIOV with dynamic critical region is enabled,
+ * the vbios image is put at a dynamic offset of VRAM in the VF.
+ * If dynamic critical region is disabled, follow the existing logic as on baremetal.
+ */
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+ } else {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+
+ memcpy_fromio(adev->bios, bios, size);
+ iounmap(bios);
}
+
adev->bios_size = size;
- memcpy_fromio(adev->bios, bios, size);
- iounmap(bios);
if (!check_atom_bios(adev, size)) {
amdgpu_bios_release(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index a716c9886c74..2b5e7c46a39d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 47e9bfba0642..9f96d568acf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1146,10 +1142,8 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index ef996493115f..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2f6a96af7fb1..ecdfe6cb36cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -29,7 +29,6 @@
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
-#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -41,6 +40,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
@@ -891,12 +891,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ e->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
if (r)
goto out_free_user_pages;
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -990,9 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1323,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a70651050acf..62d43b8cbe58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
@@ -179,7 +178,6 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
@@ -310,7 +307,6 @@ end:
mutex_unlock(&rd->lock);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
@@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
@@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
up_write(&adev->reset_domain->sem);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
r = amdgpu_benchmark(adev, val);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -1902,7 +1878,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence.base) == fence)
+ if (preempted && (&job->hw_fence->base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = -EINVAL;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
@@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
- adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
- adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
- debugfs_create_blob("amdgpu_discovery", 0444, root,
- &adev->debugfs_discovery_blob);
+ if (adev->discovery.debugfs_blob.size)
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->discovery.debugfs_blob);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index aa3736de238d..654f4844b7ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2387,7 +2387,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ * amdgpu_device_ip_is_hw - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
@@ -2395,6 +2395,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
* Check if the hardware IP is enable or not.
* Returns true if it the IP is enable, false if not.
*/
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].status.hw;
+ }
+ return false;
+}
+
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP valid
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is valid or not.
+ * Returns true if it the IP is valid, false if not.
+ */
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -2633,7 +2654,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
return 0;
chip_name = "navi12";
break;
@@ -2761,6 +2782,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
+
+ r = amdgpu_virt_init_critical_region(adev);
+ if (r)
+ return r;
}
switch (adev->asic_type) {
@@ -3780,7 +3805,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
if (r)
return r;
@@ -3863,9 +3887,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
- adev->ip_blocks[i].status.hw = false;
+ if (r)
+ return r;
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
@@ -3895,7 +3919,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
@@ -4191,7 +4215,6 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
#else
return false;
#endif
- case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
@@ -4285,58 +4308,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
long timeout;
int ret = 0;
- /*
- * By default timeout for jobs is 10 sec
- */
- adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ /* By default timeout for all queues is 2 sec */
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = msecs_to_jiffies(2000);
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
+ if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ return 0;
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- dev_warn(adev->dev, "lockup timeout disabled");
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
}
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1) {
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
}
}
+ /* When only one value specified apply it to all queues. */
+ if (index == 1)
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = timeout;
+
return ret;
}
@@ -4391,6 +4409,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
dev_info(adev->dev, "MCBP is enabled\n");
}
+static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
+
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
+
+ r = amdgpu_device_attr_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
+ if (r)
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
+
+ return r;
+}
+
+static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -4490,7 +4557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
- mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4518,7 +4584,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
- INIT_LIST_HEAD(&adev->userq_mgr_list);
+ xa_init(&adev->userq_doorbell_xa);
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
@@ -4814,39 +4880,14 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
}
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
/*
* Place those sysfs registering after `late_init`. As some of those
* operations performed in `late_init` might affect the sysfs
* interfaces creating.
*/
- r = amdgpu_atombios_sysfs_init(adev);
- if (r)
- drm_err(&adev->ddev,
- "registering atombios sysfs failed (%d).\n", r);
-
- r = amdgpu_pm_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
-
- r = amdgpu_ucode_sysfs_init(adev);
- if (r) {
- adev->ucode_sysfs_en = false;
- dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
- } else
- adev->ucode_sysfs_en = true;
-
- r = amdgpu_device_attr_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "Could not create amdgpu device attr\n");
-
- r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
- if (r)
- dev_err(adev->dev,
- "Could not create amdgpu board attributes\n");
-
- amdgpu_fru_sysfs_init(adev);
- amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_sysfs_init(adev);
+ r = amdgpu_device_sys_interface_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4874,9 +4915,6 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
- amdgpu_xgmi_reset_on_init(adev);
-
amdgpu_device_check_iommu_direct_map(adev);
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
@@ -4968,15 +5006,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->pm.sysfs_initialized)
- amdgpu_pm_sysfs_fini(adev);
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- amdgpu_device_attr_sysfs_fini(adev);
- amdgpu_fru_sysfs_fini(adev);
-
- amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_sysfs_fini(adev);
+ amdgpu_device_sys_interface_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -5051,7 +5081,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
amdgpu_discovery_fini(adev);
amdgpu_reset_put_reset_domain(adev->reset_domain);
@@ -5225,10 +5255,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_ras_suspend(adev);
- amdgpu_device_ip_suspend_phase1(adev);
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
+ r = amdgpu_userq_suspend(adev);
+ if (r)
+ return r;
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5238,7 +5272,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_fence_driver_hw_fini(adev);
- amdgpu_device_ip_suspend_phase2(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ if (r)
+ return r;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
@@ -5809,11 +5845,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!amdgpu_ring_sched_ready(ring))
continue;
- /* Clear job fence from fence drv to avoid force_completion
- * leave NULL and vm flush fence in fence drv
- */
- amdgpu_fence_driver_clear_job_fences(ring);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -6542,7 +6573,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ if (job && dma_fence_is_signaled(&job->hw_fence->base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -7286,10 +7317,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- if (ring && ring->funcs->emit_hdp_flush)
+ if (ring && ring->funcs->emit_hdp_flush) {
amdgpu_ring_emit_hdp_flush(ring);
- else
- amdgpu_asic_flush_hdp(adev, ring);
+ return;
+ }
+
+ if (!ring && amdgpu_sriov_runtime(adev)) {
+ if (!amdgpu_kiq_hdp_flush(adev))
+ return;
+ }
+
+ amdgpu_asic_flush_hdp(adev, ring);
}
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index dd7b2b796427..fa2a22dfa048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -107,6 +107,7 @@
#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
#include "jpeg_v5_0_1.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
if (discv_regn) {
- memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memcpy(binary, discv_regn, adev->discovery.size);
memunmap(discv_regn);
return 0;
}
@@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
else
vram_size <<= 20;
+ /*
+ * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
+ * then it is not required to be reserved.
+ */
if (sz_valid) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ /* For SRIOV VFs with dynamic critical region enabled,
+ * we will get the IPD binary via below call.
+ * If dynamic critical is disabled, fall through to normal seq.
+ */
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
+ &adev->discovery.size)) {
+ dev_err(adev->dev,
+ "failed to read discovery info from dynamic critical region.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery.size, false);
+ adev->discovery.reserve_tmr = true;
+ }
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
@@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
-
+exit:
return ret;
}
@@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
struct binary_header *bhdr)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct table_info *info;
uint16_t checksum;
uint16_t offset;
@@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
checksum = le16_to_cpu(info->checksum);
struct nps_info_header *nhdr =
- (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+ (struct nps_info_header *)(discovery_bin + offset);
if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
return -EINVAL;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_dbg(adev->dev, "invalid nps info data table checksum\n");
@@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
{
- if (amdgpu_discovery == 2)
+ if (amdgpu_discovery == 2) {
+ /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
+ adev->discovery.reserve_tmr = true;
return "amdgpu/ip_discovery.bin";
+ }
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
- if (!adev->mman.discovery_bin)
+ adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery.bin)
return -ENOMEM;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+ discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
drm_dbg(&adev->ddev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
+ fw_name);
if (r)
goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
- r = amdgpu_discovery_read_binary_from_mem(
- adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- size, checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size,
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
@@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ (struct ip_discovery_header *)(discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le16_to_cpu(ihdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le16_to_cpu(ihdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+ (struct gpu_info_header *)(discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
@@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(ghdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le32_to_cpu(ghdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+ (struct harvest_info_header *)(discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
@@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
@@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+ (struct vcn_info_header *)(discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
@@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
@@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (0 && offset) {
struct mall_info_header *mhdr =
- (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+ (struct mall_info_header *)(discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
@@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
@@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
if ((amdgpu_discovery != 2) &&
(RREG32(mmIP_DISCOVERY_VERSION) == 4))
amdgpu_ras_query_boot_status(adev, 4);
@@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
}
static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
@@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
@@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint8_t inst;
int i, j;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin +
- ip_offset);
+ ip = (struct ip *)(discovery_bin + ip_offset);
inst = ip->number_instance;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
@@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
@@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
return;
}
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+ harvest_info = (struct harvest_table *)(discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
@@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj)
kobj);
struct amdgpu_device *adev = ip_top->adev;
- adev->ip_top = NULL;
kfree(ip_top);
+ adev->discovery.ip_top = NULL;
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
@@ -1062,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
int ii, jj, kk, res;
uint16_t hw_id;
uint8_t inst;
@@ -1079,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
@@ -1166,17 +1204,20 @@ next_ip:
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct kset *die_kset = &adev->ip_top->die_kset;
+ struct kset *die_kset = &ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1185,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1219,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_top *ip_top;
struct kset *die_kset;
int res, ii;
- if (!adev->mman.discovery_bin)
+ if (!discovery_bin)
return -EINVAL;
- adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
- if (!adev->ip_top)
+ ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
+ if (!ip_top)
return -ENOMEM;
- adev->ip_top->adev = adev;
-
- res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ ip_top->adev = adev;
+ adev->discovery.ip_top = ip_top;
+ res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
- die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.parent = &ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
- res = kset_register(&adev->ip_top->die_kset);
+ res = kset_register(&ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
@@ -1256,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
return res;
Err:
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->kobj);
return res;
}
@@ -1301,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
struct list_head *el, *tmp;
struct kset *die_kset;
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
@@ -1313,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
- kobject_put(&adev->ip_top->die_kset.kobj);
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->die_kset.kobj);
+ kobject_put(&ip_top->kobj);
}
/* ================================================== */
@@ -1325,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
+ uint8_t *discovery_bin;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
@@ -1340,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
r = amdgpu_discovery_init(adev);
if (r)
return r;
-
+ discovery_bin = adev->discovery.bin;
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1369,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
@@ -1519,16 +1565,16 @@ next_ip:
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
uint16_t offset, ihdr_ver;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- offset);
+ ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1575,22 +1621,23 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
- gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+ gc_info = (union gc_info *)(discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
@@ -1683,24 +1730,25 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
- mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+ mall_info = (union mall_info *)(discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
@@ -1739,12 +1787,13 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
@@ -1759,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
- vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+ vcn_info = (union vcn_info *)(discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
@@ -1825,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
@@ -1841,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return r;
nps_info = &nps_data;
} else {
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
dev_err(adev->dev,
"fetch mem range failed, ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
if (!offset)
@@ -1857,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
return -ENOENT;
- nps_info =
- (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info = (union nps_info *)(discovery_bin + offset);
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
@@ -2361,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_version(adev, SDMA0_HWIP, 0));
return -EINVAL;
}
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -3141,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_ras_ip_blocks(adev);
+ if (r)
+ return r;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index b44d56465c5b..4ce04486cc31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,9 +24,21 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#include <linux/debugfs.h>
+
#define DISCOVERY_TMR_SIZE (10 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
+struct ip_discovery_top;
+
+struct amdgpu_discovery_info {
+ struct debugfs_blob_wrapper debugfs_blob;
+ struct ip_discovery_top *ip_top;
+ uint32_t size;
+ uint8_t *bin;
+ bool reserve_tmr;
+};
+
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 51bab32fd8c6..b5d34797d606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
@@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
+/**
+ * DOC: property for adaptive backlight modulation
+ *
+ * The 'adaptive backlight modulation' property is used for the compositor to
+ * directly control the adaptive backlight modulation power savings feature
+ * that is part of DCN hardware.
+ *
+ * The property will be attached specifically to eDP panels that support it.
+ *
+ * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
+ * to be able to control it.
+ * If set to 'off' the compositor will ensure it stays off.
+ * The other values 'min', 'bias min', 'bias max', and 'max' will control the
+ * intensity of the power savings.
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
+{
+ const struct drm_prop_enum_list props[] = {
+ { ABM_SYSFS_CONTROL, "sysfs" },
+ { ABM_LEVEL_OFF, "off" },
+ { ABM_LEVEL_MIN, "min" },
+ { ABM_LEVEL_BIAS_MIN, "bias min" },
+ { ABM_LEVEL_BIAS_MAX, "bias max" },
+ { ABM_LEVEL_MAX, "max" },
+ };
+ struct drm_property *prop;
+ int i;
+
+ if (!adev->dc_enabled)
+ return 0;
+
+ prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
+ "adaptive backlight modulation",
+ 6);
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(adev_to_drm(adev), prop);
+
+ return ret;
+ }
+ }
+
+ adev->mode_info.abm_level_property = prop;
+
+ return 0;
+}
+
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- return 0;
+ return amdgpu_display_setup_abm_prop(adev);
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 930c171473b4..49a29bf47a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb);
+#define ABM_SYSFS_CONTROL -1
+#define ABM_LEVEL_OFF 0
+#define ABM_LEVEL_MIN 1
+#define ABM_LEVEL_BIAS_MIN 2
+#define ABM_LEVEL_BIAS_MAX 3
+#define ABM_LEVEL_MAX 4
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 61268aa82df4..3776901bbb1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -354,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
- * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
- * multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to the default timeout.
+ * The format can be [single value] for setting all timeouts at once or
+ * [GFX,Compute,SDMA,Video] to set individual timeouts.
+ * Negative values mean infinity.
*
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX.
- * The second one is for Compute. The third and fourth ones are
- * for SDMA and Video.
- *
- * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
+ * By default(with no lockup_timeout settings), the timeout for all queues is 2000.
*/
MODULE_PARM_DESC(lockup_timeout,
- "GPU lockup timeout in ms (default: 10000 for all jobs. "
- "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
-module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+ "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout,
+ sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -2234,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
adev->pdev->bus->number, i);
if (p) {
pm_runtime_get_sync(&p->dev);
- pm_runtime_mark_last_busy(&p->dev);
pm_runtime_put_autosuspend(&p->dev);
pci_dev_put(p);
}
@@ -2480,7 +2473,6 @@ retry_init:
pm_runtime_allow(ddev->dev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
@@ -2564,7 +2556,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- amdgpu_device_ip_suspend(adev);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -2777,22 +2770,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- ret = -EBUSY;
- goto done;
- }
- }
-done:
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -2939,7 +2918,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
ret = amdgpu_runtime_idle_check_userq(dev);
done:
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
@@ -2975,7 +2953,6 @@ long amdgpu_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 18a7829122d2..c7843e336310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -45,16 +45,11 @@
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
-static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops ||
- __f->base.ops == &amdgpu_job_fence_ops)
- return __f;
-
- return NULL;
+ return __f;
}
/**
@@ -98,51 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @f: resulting fence object
* @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
- struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- if (!af) {
- /* create a separate hw fence */
- am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
- if (!am_fence)
- return -ENOMEM;
- } else {
- am_fence = af;
- }
- fence = &am_fence->base;
- am_fence->ring = ring;
+ fence = &af->base;
+ af->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- am_fence->seq = seq;
- if (af) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- amdgpu_fence_save_wptr(fence);
+ amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -167,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = fence;
-
return 0;
}
@@ -276,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
@@ -670,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
- *
- * @ring: fence of the ring to be cleared
- *
- */
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
-{
- int i;
- struct dma_fence *old, **ptr;
-
- for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
- ptr = &ring->fence_drv.fences[i];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && old->ops == &amdgpu_job_fence_ops) {
- struct amdgpu_job *job;
-
- /* For non-scheduler bad job, i.e. failed ib test, we need to signal
- * it right here or we won't be able to track them in fence_drv
- * and they will remain unsignaled during sa_bo free.
- */
- job = container_of(old, struct amdgpu_job, hw_fence.base);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
- dma_fence_signal(old);
- RCU_INIT_POINTER(*ptr, NULL);
- dma_fence_put(old);
- }
- }
-}
-
-/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
@@ -755,7 +698,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
/**
* amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
*
- * @fence: fence of the ring to signal
+ * @af: fence of the ring to signal
*
*/
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
@@ -792,15 +735,13 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
/* signal the guilty fence */
- amdgpu_fence_write(ring, af->seq);
+ amdgpu_fence_write(ring, (u32)af->base.seqno);
amdgpu_fence_process(ring);
}
-void amdgpu_fence_save_wptr(struct dma_fence *fence)
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
{
- struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
-
- am_fence->wptr = am_fence->ring->wptr;
+ af->wptr = af->ring->wptr;
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
@@ -866,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
return (const char *)to_amdgpu_fence(f)->ring->name;
}
-static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- return (const char *)to_amdgpu_ring(job->base.sched)->name;
-}
-
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
@@ -890,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
}
/**
- * amdgpu_job_fence_enable_signaling - enable signalling on job fence
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_enable_signaling above, it
- * only handles the job embedded fence.
- */
-static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
-
- return true;
-}
-
-/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -922,21 +839,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
}
/**
- * amdgpu_job_fence_free - free up the job with embedded fence
- *
- * @rcu: RCU callback head
- *
- * Free up the job with embedded fence after the RCU grace period.
- */
-static void amdgpu_job_fence_free(struct rcu_head *rcu)
-{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-
- /* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence.base));
-}
-
-/**
* amdgpu_fence_release - callback that fence can be freed
*
* @f: fence
@@ -949,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
-/**
- * amdgpu_job_fence_release - callback that job embedded fence can be freed
- *
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_release above, it
- * only handles the job embedded fence.
- */
-static void amdgpu_job_fence_release(struct dma_fence *f)
-{
- call_rcu(&f->rcu, amdgpu_job_fence_free);
-}
-
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -969,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
-static const struct dma_fence_ops amdgpu_job_fence_ops = {
- .get_driver_name = amdgpu_fence_get_driver_name,
- .get_timeline_name = amdgpu_job_fence_get_timeline_name,
- .enable_signaling = amdgpu_job_fence_enable_signaling,
- .release = amdgpu_job_fence_release,
-};
-
/*
* Fence debugfs
*/
@@ -1045,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
*val = atomic_read(&adev->reset_domain->reset_res);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b2033f8352f5..83f3b94ed975 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
- unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
@@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++) {
+ for (i = 0; i < pages; i++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 094c508d3d44..3e38c5db2987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, &range);
- if (r)
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ amdgpu_hmm_range_free(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
@@ -597,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ebe2b4c68b0f..8b118c53f351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
@@ -1194,6 +1195,75 @@ failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_hdp_flush(adev);
+
+ if (!ring->funcs->emit_hdp_flush) {
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_hdp_flush;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_hdp_flush;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+ dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+ return r < 0 ? r : -EIO;
+}
+
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
if (amdgpu_num_kcq == -1) {
@@ -1600,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -2485,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
&amdgpu_debugfs_compute_sched_mask_fops);
#endif
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index fb5f7a0ee029..efd61a1ccc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 2c6a6b858112..518ca3f4db2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -168,17 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range)
+ struct amdgpu_hmm_range *range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
unsigned long *pfns;
int r = 0;
-
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
@@ -221,28 +217,79 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
+ hmm_range->hmm_pfns = NULL;
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+/**
+ * amdgpu_hmm_range_valid - check if an HMM range is still valid
+ * @range: pointer to the &struct amdgpu_hmm_range to validate
+ *
+ * Determines whether the given HMM range @range is still valid by
+ * checking for invalidations via the MMU notifier sequence. This is
+ * typically used to verify that the range has not been invalidated
+ * by concurrent address space updates before it is accessed.
+ *
+ * Return:
+ * * true if @range is valid and can be used safely
+ * * false if @range is NULL or has been invalidated
+ */
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- bool r;
+ if (!range)
+ return false;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
+}
- return r;
+/**
+ * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range
+ * @bo: optional buffer object to associate with this HMM range
+ *
+ * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed.
+ * The reference count of the @bo is incremented.
+ *
+ * Return:
+ * Pointer to a newly allocated struct amdgpu_hmm_range on success,
+ * or NULL if memory allocation fails.
+ */
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
+}
+
+/**
+ * amdgpu_hmm_range_free - release an AMDGPU HMM range
+ * @range: pointer to the range object to free
+ *
+ * Releases all resources held by @range, including the associated
+ * hmm_pfns and the dropping reference of associated bo if any.
+ *
+ * Return: void
+ */
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
+{
+ if (!range)
+ return;
+
+ if (range->hmm_range.hmm_pfns)
+ kvfree(range->hmm_range.hmm_pfns);
+
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index 953e1d06de20..140bc9cd57b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,13 +31,20 @@
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
@@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
+{
+ return false;
+}
+
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7d9bcb72e8dd..39229ece83f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job) {
vm = job->vm;
fence_ctx = job->base.s_fence ?
- job->base.s_fence->scheduled.context : 0;
+ job->base.s_fence->finished.context : 0;
shadow_va = job->shadow_va;
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
- af = &job->hw_fence;
+ af = job->hw_fence;
/* Save the context of the job for reset handling.
* The driver needs this so it can skip the ring
* contents for guilty contexts.
*/
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ af->context = fence_ctx;
+ /* the vm fence is also part of the job's context */
+ job->hw_vm_fence->context = fence_ctx;
} else {
vm = NULL;
fence_ctx = 0;
@@ -167,7 +169,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
- af = NULL;
+ af = kzalloc(sizeof(*af), GFP_ATOMIC);
+ if (!af)
+ return -ENOMEM;
}
if (!ring->sched.ready) {
@@ -289,7 +293,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, af, fence_flags);
+ r = amdgpu_fence_emit(ring, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -297,6 +301,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_undo(ring);
return r;
}
+ *f = &af->base;
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -317,7 +322,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
* fence so we know what rings contents to backup
* after we reset the queue.
*/
- amdgpu_fence_save_wptr(*f);
+ amdgpu_fence_save_wptr(af);
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d020a890a0ea..e08d837668f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -137,7 +137,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ring->funcs->reset) {
dev_err(adev->dev, "Starting %s ring reset\n",
s_job->sched->name);
- r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
atomic_inc(&ring->adev->gpu_reset_counter);
dev_err(adev->dev, "Ring %s reset succeeded\n",
@@ -186,6 +186,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned int num_ibs, struct amdgpu_job **job,
u64 drm_client_id)
{
+ struct amdgpu_fence *af;
+ int r;
+
if (num_ibs == 0)
return -EINVAL;
@@ -193,6 +196,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -204,6 +221,13 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return drm_sched_job_init(&(*job)->base, entity, 1, owner,
drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+
+ return r;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -251,11 +275,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* Check if any fences where initialized */
+ /* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.base.ops)
- f = &job->hw_fence.base;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
else
f = NULL;
@@ -271,11 +295,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
- /* only put the hw fence if has embedded fence */
- if (!job->hw_fence.base.ops)
- kfree(job);
- else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -304,10 +324,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.base.ops)
- kfree(job);
- else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 4a6487eb6cb5..7abf069d17d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -64,7 +64,8 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct amdgpu_fence hw_fence;
+ struct amdgpu_fence *hw_fence;
+ struct amdgpu_fence *hw_vm_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b3e6b3fcdf2c..6ee77f431d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1471,7 +1471,6 @@ error_pasid:
kfree(fpriv);
out_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
@@ -1539,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 4883adcfbb4b..9c182ce501af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
- adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+ adev->mes.vmid_mask_mmhub = 0xFF00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00;
num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
@@ -528,6 +528,18 @@ error:
return r;
}
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
+{
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+
+ hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+ hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+ ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+ return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+ ref_and_mask, ref_and_mask);
+}
+
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 97c137c90f97..e989225b354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -239,6 +239,7 @@ struct mes_add_queue_input {
struct mes_remove_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
+ bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
@@ -428,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 20460cfd09bc..dc8d2f52c7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -326,6 +326,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
const struct drm_edid *bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 656b8a931dae..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
* if non-zero, cannot unmap from GPU because user queues may still access it
*/
unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 123bcf5c2bb1..bacf888735db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
}
amdgpu_gfx_off_ctrl(adev, true);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index e0ee21150860..c8b4dd3ea5c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -41,6 +41,7 @@
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras_mgr.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -611,6 +612,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
+
/**
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
*
@@ -635,6 +638,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
(struct amdgpu_device *)file_inode(f)->i_private;
int ret;
+ if (amdgpu_uniras_enabled(adev)) {
+ ret = amdgpu_uniras_clear_badpages_info(adev);
+ return ret ? ret : size;
+ }
+
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
@@ -1542,9 +1550,51 @@ out_fini_err_data:
return ret;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
+{
+ struct ras_cmd_dev_handle req = {0};
+ int ret;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ &req, sizeof(req), NULL, 0);
+ if (ret) {
+ dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_cmd_block_ecc_info_req req = {0};
+ struct ras_cmd_block_ecc_info_rsp rsp = {0};
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ req.block_id = info->head.block;
+ req.subblock_id = info->head.sub_block_index;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ if (!ret) {
+ info->ce_count = rsp.ce_count;
+ info->ue_count = rsp.ue_count;
+ info->de_count = rsp.de_count;
+ }
+
+ return ret;
+}
+
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
- return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_query_block_ecc(adev, info);
+ else
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
@@ -1596,6 +1646,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_cmd_inject_error_req inject_req;
+ struct ras_cmd_inject_error_rsp rsp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&inject_req, 0, sizeof(inject_req));
+ inject_req.block_id = info->head.block;
+ inject_req.subblock_id = info->head.sub_block_index;
+ inject_req.address = info->address;
+ inject_req.error_type = info->head.type;
+ inject_req.instance_mask = info->instance_mask;
+ inject_req.value = info->value;
+
+ return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
+ &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -1613,6 +1684,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_error_inject(adev, info);
+
/* inject on guest isn't allowed, return success directly */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1757,7 +1831,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count);
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
@@ -1815,19 +1891,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
unsigned int end = div64_ul(ppos + count - 1, element_size);
ssize_t s = 0;
struct ras_badpage *bps = NULL;
- unsigned int bps_count = 0;
+ int bps_count = 0, i, status;
+ uint64_t address;
memset(buf, 0, count);
- if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ bps_count = end - start;
+ bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
return 0;
- for (; start < end && start < bps_count; start++)
+ memset(bps, 0, sizeof(*bps) * bps_count);
+
+ if (amdgpu_uniras_enabled(adev))
+ bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
+ else
+ bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
+
+ if (bps_count <= 0) {
+ kfree(bps);
+ return 0;
+ }
+
+ for (i = 0; i < bps_count; i++) {
+ address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
+ if (amdgpu_ras_check_critical_address(adev, address))
+ continue;
+
+ bps[i].size = AMDGPU_GPU_PAGE_SIZE;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ address);
+ if (status == -EBUSY)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ else
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
+
s += scnprintf(&buf[s], element_size + 1,
"0x%08x : 0x%08x : %1s\n",
- bps[start].bp,
- bps[start].size,
- amdgpu_ras_badpage_flags_str(bps[start].flags));
+ bps[i].bp,
+ bps[i].size,
+ amdgpu_ras_badpage_flags_str(bps[i].flags));
+ }
kfree(bps);
@@ -1843,12 +1950,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}
+static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
+ u32 *minor, u32 *rev)
+{
+ int i;
+
+ if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
+ return false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
+ *major = adev->ip_blocks[i].version->major;
+ *minor = adev->ip_blocks[i].version->minor;
+ *rev = adev->ip_blocks[i].version->rev;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, version_attr);
- return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+ u32 major, minor, rev;
+ ssize_t size = 0;
+
+ size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
+ con->eeprom_control.tbl_hdr.version);
+
+ if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
+ size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
+ major, minor, rev);
+
+ return size;
}
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
@@ -2241,6 +2378,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
return;
+ if (amdgpu_uniras_enabled(adev)) {
+ amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
+ return;
+ }
+
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
@@ -2411,6 +2553,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_ih_data *data;
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = info->head.block;
+ memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
+
+ return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
+ }
+
obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
@@ -2605,62 +2757,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
}
}
-/* recovery begin */
-
-/* return 0 on success.
- * caller need free bps.
- */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count)
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = 0;
- int ret = 0, status;
+ int r = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data || data->count == 0) {
- *bps = NULL;
- ret = -EINVAL;
- goto out;
+ if (start < data->count) {
+ for (i = start; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
+ bps[r].bp = data->bps[i].retired_page;
+ r++;
+ if (r >= count)
+ break;
+ }
}
+ mutex_unlock(&con->recovery_lock);
- *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
- if (!*bps) {
- ret = -ENOMEM;
- goto out;
- }
+ return r;
+}
- for (; i < data->count; i++) {
- if (!data->bps[i].ts)
- continue;
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
+{
+ struct ras_cmd_bad_pages_info_req cmd_input;
+ struct ras_cmd_bad_pages_info_rsp *output;
+ uint32_t group, start_group, end_group;
+ uint32_t pos, pos_in_group;
+ int r = 0, i;
- (*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].retired_page,
- .size = AMDGPU_GPU_PAGE_SIZE,
- .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
- };
+ if (!bps || !count)
+ return -EINVAL;
- if (amdgpu_ras_check_critical_address(adev,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
+ output = kmalloc(sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
- if (status == -EBUSY)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (status == -ENOENT)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ memset(&cmd_input, 0, sizeof(cmd_input));
+
+ start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
+ RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+
+ pos = start;
+ for (group = start_group; group < end_group; group++) {
+ memset(output, 0, sizeof(*output));
+ cmd_input.group_index = group;
+ if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
+ &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
+ goto out;
+
+ if (pos >= output->bp_total_cnt)
+ goto out;
+
+ pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
+ if (!output->records[i].ts)
+ continue;
+
+ bps[r].bp = output->records[i].retired_page;
+ r++;
+ if (r >= count)
+ goto out;
+ }
}
- *count = con->bad_page_num;
out:
- mutex_unlock(&con->recovery_lock);
- return ret;
+ kfree(output);
+ return r;
}
static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
@@ -3126,7 +3299,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
*new_cnt = unit_num;
/* only new entries are saved */
- if (unit_num > 0) {
+ if (unit_num && save_count) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
@@ -3590,6 +3763,9 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!con || amdgpu_sriov_vf(adev))
return 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
control = &con->eeprom_control;
ret = amdgpu_ras_eeprom_init(control);
control->is_eeprom_valid = !ret;
@@ -3975,7 +4151,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
atomic_set(&con->ras_ue_count, ue_count);
}
- pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
@@ -4584,6 +4759,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
struct ras_event_state *event_state;
int ret = 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
if (type >= RAS_EVENT_TYPE_COUNT) {
ret = -EINVAL;
goto out;
@@ -4634,20 +4812,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type
return id;
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
- u64 event_id;
+ u64 event_id = RAS_EVENT_INVALID_ID;
- if (amdgpu_ras_mark_ras_event(adev, type)) {
- dev_err(adev->dev,
- "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
- return;
- }
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
- event_id = amdgpu_ras_acquire_event_id(adev, type);
+ if (!amdgpu_ras_mark_ras_event(adev, type))
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
@@ -4656,6 +4832,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
+
+ return -EBUSY;
}
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
@@ -5408,6 +5586,9 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_is_rma(adev);
+
if (!con)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6cf0dfd38be8..556cf4d7b5ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -504,6 +504,7 @@ struct ras_critical_region {
};
struct amdgpu_ras {
+ void *ras_mgr;
/* ras infrastructure */
/* for ras itself. */
uint32_t features;
@@ -909,7 +910,7 @@ static inline void amdgpu_ras_intr_cleared(void)
atomic_set(&amdgpu_ras_in_intr, 0);
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 3eb3fb55ccb0..5a7bf0661dbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
@@ -556,6 +557,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev);
+
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5ec5c3ff22bb..43f769fed810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -159,8 +159,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->funcs->align_mask)
- ib->ptr[ib->length_dw++] = ring->funcs->nop;
+ u32 align_mask = ring->funcs->align_mask;
+ u32 count = ib->length_dw & align_mask;
+
+ if (count) {
+ count = align_mask + 1 - count;
+
+ memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
+
+ ib->length_dw += count;
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4b46e3c26ff3..87b962df5460 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -147,16 +147,14 @@ struct amdgpu_fence {
u64 wptr;
/* fence context for resets */
u64 context;
- uint32_t seq;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
-void amdgpu_fence_save_wptr(struct dma_fence *fence);
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -166,8 +164,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 41ebe690eeff..3739be1b71e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
dev_err(adev->dev, "Invalid input: %s\n", str);
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8e82163981f4..9777c5c9cb26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -286,12 +286,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
* move and different for a BO to BO copy.
*
*/
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f)
+__attribute__((nonnull))
+static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
@@ -365,9 +366,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
- if (f)
- *f = dma_fence_get(fence);
- dma_fence_put(fence);
+ *f = fence;
return r;
}
@@ -706,10 +705,11 @@ struct amdgpu_ttm_tt {
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -719,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
@@ -756,38 +753,6 @@ out_unlock:
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
@@ -797,12 +762,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
@@ -1804,18 +1769,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- if (!adev->gmc.is_app_apu) {
- ret = amdgpu_bo_create_kernel_at(
- adev, adev->gmc.real_vram_size - reserve_size,
- reserve_size, &adev->mman.fw_reserved_memory, NULL);
- if (ret) {
- dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
- NULL, NULL);
- return ret;
- }
- } else {
- DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
+ &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ return ret;
}
return 0;
@@ -1983,19 +1944,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- *The reserved vram for driver must be pinned to the specified
- *place on the VRAM, so reserve it early.
+ * The reserved VRAM for the driver must be pinned to a specific
+ * location in VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
- * only NAVI10 and onwards ASIC support for IP discovery.
- * If IP discovery enabled, a block of memory should be
- * reserved for IP discovey.
+ * only NAVI10 and later ASICs support IP discovery.
+ * If IP discovery is enabled, a block of memory should be
+ * reserved for it.
*/
- if (adev->mman.discovery_bin) {
+ if (adev->discovery.reserve_tmr) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0be2728aa872..577ee04ce0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -28,6 +28,7 @@
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -82,9 +83,6 @@ struct amdgpu_mman {
uint64_t stolen_reserved_offset;
uint64_t stolen_reserved_size;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
struct amdgpu_bo *fw_reserved_memory_extend;
@@ -170,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f);
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
@@ -192,29 +184,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 1add21160d21..13cc5a686dfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -29,6 +29,8 @@
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_userq.h"
+#include "amdgpu_hmm.h"
+#include "amdgpu_reset.h"
#include "amdgpu_userq_fence.h"
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
@@ -44,10 +46,29 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
-int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
- u64 expected_size)
+static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
+ struct amdgpu_bo_va_mapping *va_map, u64 addr)
+{
+ struct amdgpu_userq_va_cursor *va_cursor;
+ struct userq_va_list;
+
+ va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+ if (!va_cursor)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&va_cursor->list);
+ va_cursor->gpu_addr = addr;
+ atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+ list_add(&va_cursor->list, &queue->userq_va_list);
+
+ return 0;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size)
{
struct amdgpu_bo_va_mapping *va_map;
+ struct amdgpu_vm *vm = queue->vm;
u64 user_addr;
u64 size;
int r = 0;
@@ -67,6 +88,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
/* Only validate the userq whether resident in the VM mapping range */
if (user_addr >= va_map->start &&
va_map->last - user_addr + 1 >= size) {
+ amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
amdgpu_bo_unreserve(vm->root.bo);
return 0;
}
@@ -77,6 +99,76 @@ out_err:
return r;
}
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ bool r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+ r = true;
+ else
+ r = false;
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ int r = 0;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+ dev_dbg(queue->userq_mgr->adev->dev,
+ "validate the userq mapping:%p va:%llx r:%d\n",
+ queue, va_cursor->gpu_addr, r);
+ }
+
+ if (r != 0)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_userq_va_cursor *va_cursor)
+{
+ atomic_set(&mapping->bo_va->userq_va_mapped, 0);
+ list_del(&va_cursor->list);
+ kfree(va_cursor);
+}
+
+static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
+ if (!mapping) {
+ r = -EINVAL;
+ goto err;
+ }
+ dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
+ queue, va_cursor->gpu_addr);
+ amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
+ }
+err:
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ return r;
+}
+
static int
amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
@@ -159,19 +251,24 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
return r;
}
-static void
+static int
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct dma_fence *f = queue->last_fence;
- int ret;
+ int ret = 0;
if (f && !dma_fence_is_signaled(f)) {
- ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
- if (ret <= 0)
+ ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ return -ETIME;
+ }
}
+
+ return ret;
}
static void
@@ -182,16 +279,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ /* Drop the userq reference. */
+ amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
- idr_remove(&uq_mgr->userq_idr, queue_id);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
+ list_del(&queue->userq_va_list);
kfree(queue);
+
+ up_read(&adev->reset_domain->sem);
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
- return idr_find(&uq_mgr->userq_idr, qid);
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
}
void
@@ -319,17 +427,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
case AMDGPU_HW_IP_DMA:
db_size = sizeof(u64);
break;
-
- case AMDGPU_HW_IP_VCN_ENC:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
- break;
-
- case AMDGPU_HW_IP_VPE:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
- break;
-
default:
drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
db_info->queue_type);
@@ -391,7 +488,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -463,8 +559,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
struct amdgpu_db_info db_info;
char *queue_name;
bool skip_map_queue;
+ u32 qid;
uint64_t index;
- int qid, r = 0;
+ int r = 0;
int priority =
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
@@ -487,7 +584,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
- mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
@@ -505,14 +601,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
- /* Validate the userq virtual address.*/
- if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
- amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
- amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
- r = -EINVAL;
- kfree(queue);
- goto unlock;
- }
+ INIT_LIST_HEAD(&queue->userq_va_list);
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -523,6 +612,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
db_info.db_obj = &queue->db_obj;
db_info.doorbell_offset = args->in.doorbell_offset;
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
@@ -548,16 +646,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
- qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
- if (qid < 0) {
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
goto unlock;
}
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
@@ -570,7 +679,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- idr_remove(&uq_mgr->userq_idr, qid);
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -595,7 +704,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
return r;
}
@@ -693,11 +801,19 @@ static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+
+ if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+ drm_file_err(uq_mgr->file,
+ "trying restore queue without va mapping\n");
+ queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+ continue;
+ }
+
r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
@@ -760,12 +876,21 @@ static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ bool invalidated = false, new_addition = false;
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_hmm_range *range;
struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned long key, tmp_key;
struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
struct drm_exec exec;
+ struct xarray xa;
int ret;
+ xa_init(&xa);
+
+retry_lock:
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
@@ -792,10 +917,72 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
goto unlock_all;
}
+ if (invalidated) {
+ xa_for_each(&xa, tmp_key, range) {
+ bo = range->bo;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+ }
+ invalidated = false;
+ }
+
ret = amdgpu_vm_handle_moved(adev, vm, NULL);
if (ret)
goto unlock_all;
+ key = 0;
+ /* Validate User Ptr BOs */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+
+ if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
+ continue;
+
+ range = xa_load(&xa, key);
+ if (range && range->bo != bo) {
+ xa_erase(&xa, key);
+ amdgpu_hmm_range_free(range);
+ range = NULL;
+ }
+
+ if (!range) {
+ range = amdgpu_hmm_range_alloc(bo);
+ if (!range) {
+ ret = -ENOMEM;
+ goto unlock_all;
+ }
+
+ xa_store(&xa, key, range, GFP_KERNEL);
+ new_addition = true;
+ }
+ key++;
+ }
+
+ if (new_addition) {
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (ret)
+ goto unlock_all;
+ }
+
+ invalidated = true;
+ new_addition = false;
+ goto retry_lock;
+ }
+
ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
goto unlock_all;
@@ -815,6 +1002,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
unlock_all:
drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ amdgpu_hmm_range_free(range);
+ }
+ xa_destroy(&xa);
return ret;
}
@@ -848,11 +1042,11 @@ static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Try to unmap all the queues in this process ctx */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
@@ -867,9 +1061,10 @@ static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id, ret;
+ unsigned long queue_id;
+ int ret;
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f))
@@ -922,44 +1117,30 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
- idr_init_base(&userq_mgr->userq_idr, 1);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
userq_mgr->adev = adev;
userq_mgr->file = file_priv;
- mutex_lock(&adev->userq_mutex);
- list_add(&userq_mgr->list, &adev->userq_mgr_list);
- mutex_unlock(&adev->userq_mutex);
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
return 0;
}
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
{
- struct amdgpu_device *adev = userq_mgr->adev;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- uint32_t queue_id;
+ unsigned long queue_id;
cancel_delayed_work_sync(&userq_mgr->resume_work);
- mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
- idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- if (uqm == userq_mgr) {
- list_del(&uqm->list);
- break;
- }
- }
- idr_destroy(&userq_mgr->userq_idr);
+ xa_destroy(&userq_mgr->userq_mgr_xa);
mutex_unlock(&userq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
@@ -967,57 +1148,50 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_preempt_helper(uqm, queue);
- else
- r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return 0;
}
int amdgpu_userq_resume(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_restore_helper(uqm, queue);
- else
- r = amdgpu_userq_map_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+
+ return 0;
}
int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
@@ -1025,33 +1199,31 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
adev->userq_halt_for_enforce_isolation = true;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
- r = amdgpu_userq_preempt_helper(uqm, queue);
- if (r)
- ret = r;
- }
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
}
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
@@ -1060,21 +1232,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
adev->userq_halt_for_enforce_isolation = false;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
@@ -1082,9 +1253,39 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (r)
ret = r;
}
- }
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_bo_va *bo_va = mapping->bo_va;
+ struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+ int ret = 0;
+
+ if (!ip_mask)
+ return 0;
+
+ dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
+ /**
+ * The userq VA mapping reservation should include the eviction fence,
+ * if the eviction fence can't signal successfully during unmapping,
+ * then driver will warn to flag this improper unmap of the userq VA.
+ * Note: The eviction fence may be attached to different BOs, and this
+ * unmap is only for one kind of userq VAs, so at this point suppose
+ * the eviction fence is always unsignaled.
+ */
+ if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return -EBUSY;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index c027dd916672..09da0617bfa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -37,6 +37,7 @@ enum amdgpu_userq_state {
AMDGPU_USERQ_STATE_MAPPED,
AMDGPU_USERQ_STATE_PREEMPTED,
AMDGPU_USERQ_STATE_HUNG,
+ AMDGPU_USERQ_STATE_INVALID_VA,
};
struct amdgpu_mqd_prop;
@@ -47,6 +48,11 @@ struct amdgpu_userq_obj {
struct amdgpu_bo *obj;
};
+struct amdgpu_userq_va_cursor {
+ u64 gpu_addr;
+ struct list_head list;
+};
+
struct amdgpu_usermode_queue {
int queue_type;
enum amdgpu_userq_state state;
@@ -66,6 +72,8 @@ struct amdgpu_usermode_queue {
u32 xcp_id;
int priority;
struct dentry *debugfs_queue;
+
+ struct list_head userq_va_list;
};
struct amdgpu_userq_funcs {
@@ -88,11 +96,15 @@ struct amdgpu_userq_funcs {
/* Usermode queues for gfx */
struct amdgpu_userq_mgr {
- struct idr userq_idr;
+ /**
+ * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+ * Key: queue_id (unique ID within the process's userq manager)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_mgr_xa;
struct mutex userq_mutex;
struct amdgpu_device *adev;
struct delayed_work resume_work;
- struct list_head list;
struct drm_file *file;
};
@@ -136,7 +148,9 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
-
-int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
- u64 expected_size);
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 761bad98da3e..2aeeaa954882 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -537,7 +537,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
}
/* Retrieve the user queue */
- queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
if (!queue) {
r = -ENOENT;
goto put_gobj_write;
@@ -899,7 +899,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
*/
num_fences = dma_fence_dedup_array(fences, num_fences);
- waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
if (!waitq) {
r = -EINVAL;
goto free_fences;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dc8a17bcc3c8..82624b44e661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -100,7 +100,8 @@
#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -161,7 +162,8 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f96beb96c75c..f2ce8f506aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -44,6 +44,18 @@
vf2pf_info->ucode_info[ucode].version = ver; \
} while (0)
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+
+const char *amdgpu_virt_dynamic_crit_table_name[] = {
+ "IP DISCOVERY",
+ "VBIOS IMG",
+ "RAS TELEMETRY",
+ "DATA EXCHANGE",
+ "BAD PAGE INFO",
+ "INIT HEADER",
+ "LAST",
+};
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
- DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
+ adev->virt.req_init_data_ver);
else
- DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+ dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
}
/**
@@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
- DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
@@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
&bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_dbg(adev->dev,
+ "RAS WARN: reserve vram for retired page %llx fail\n",
+ bp);
data->bps_bo[i] = bo;
}
data->last_reserved = i + 1;
@@ -658,10 +673,34 @@ out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
+static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
+{
+ uint32_t dataexchange_offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
+ uint32_t dataexchange_size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
+ uint64_t pos = 0;
+
+ dev_info(adev->dev,
+ "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ dataexchange_offset, dataexchange_size);
+
+ if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
+ dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+
+ pos = (uint64_t)dataexchange_offset;
+ amdgpu_device_vram_access(adev, pos, pfvf_data,
+ dataexchange_size, false);
+
+ return 0;
+}
+
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
+ dev_info(adev->dev, "clean up the vf2pf work item\n");
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
adev->virt.vf2pf_update_interval_ms = 0;
}
@@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
+ uint32_t *pfvf_data = NULL;
+
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
- DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ pfvf_data =
+ kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
+ GFP_KERNEL);
+ if (!pfvf_data) {
+ dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
+ return;
+ }
- amdgpu_virt_read_pf2vf_data(adev);
+ if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
+ goto free_pfvf_data;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+free_pfvf_data:
+ kfree(pfvf_data);
+ pfvf_data = NULL;
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
}
}
@@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
if (adev->mman.fw_vram_usage_va) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
+ (AMD_SRIOV_MSG_SIZE_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ }
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
break;
default: /* other chip doesn't support SRIOV */
is_sriov = false;
- DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
@@ -842,6 +921,215 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
adev->virt.ras.cper_rptr = 0;
}
+static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
+{
+ uint32_t sum = 0;
+
+ if (buf_start >= buf_end)
+ return 0;
+
+ for (; buf_start < buf_end; buf_start++)
+ sum += buf_start[0];
+
+ return 0xffffffff - sum;
+}
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
+ u64 init_hdr_offset = adev->virt.init_data_header.offset;
+ u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
+ u64 vram_size;
+ u64 end;
+ int r = 0;
+ uint8_t checksum = 0;
+
+ /* Skip below init if critical region version != v2 */
+ if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
+ return 0;
+
+ if (init_hdr_offset < 0) {
+ dev_err(adev->dev, "Invalid init header offset\n");
+ return -EINVAL;
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ return -EINVAL;
+ vram_size <<= 20;
+
+ if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
+ dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
+ return -EINVAL;
+ }
+
+ /* Allocate for init_data_hdr */
+ init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
+ if (!init_data_hdr)
+ return -ENOMEM;
+
+ amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
+ sizeof(struct amd_sriov_msg_init_data_header), false);
+
+ /* Table validation */
+ if (strncmp(init_data_hdr->signature,
+ AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
+ AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
+ dev_err(adev->dev, "Invalid init data signature: %.4s\n",
+ init_data_hdr->signature);
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum = amdgpu_virt_crit_region_calc_checksum(
+ (uint8_t *)&init_data_hdr->initdata_offset,
+ (uint8_t *)init_data_hdr +
+ sizeof(struct amd_sriov_msg_init_data_header));
+ if (checksum != init_data_hdr->checksum) {
+ dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
+ checksum, init_data_hdr->checksum);
+ r = -EINVAL;
+ goto out;
+ }
+
+ memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
+ memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
+
+ adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
+ adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
+
+ /* Validation and initialization for each table entry */
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
+ if (!init_data_hdr->ip_discovery_size_in_kb ||
+ init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
+ init_data_hdr->ip_discovery_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
+ init_data_hdr->ip_discovery_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
+ init_data_hdr->ip_discovery_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
+ if (!init_data_hdr->vbios_img_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
+ init_data_hdr->vbios_img_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
+ init_data_hdr->vbios_img_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
+ init_data_hdr->vbios_img_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
+ if (!init_data_hdr->ras_tele_info_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
+ init_data_hdr->ras_tele_info_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
+ init_data_hdr->ras_tele_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
+ init_data_hdr->ras_tele_info_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
+ if (!init_data_hdr->dataexchange_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
+ init_data_hdr->dataexchange_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
+ init_data_hdr->dataexchange_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
+ init_data_hdr->dataexchange_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
+ if (!init_data_hdr->bad_page_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
+ init_data_hdr->bad_page_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
+ init_data_hdr->bad_page_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
+ init_data_hdr->bad_page_size_in_kb;
+ }
+
+ /* Validation for critical region info */
+ if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* reserved memory starts from crit region base offset with the size of 5MB */
+ adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
+ adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ dev_info(adev->dev,
+ "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
+ init_data_hdr->version,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size >> 10);
+
+ adev->virt.is_dynamic_crit_regn_enabled = true;
+
+out:
+ kfree(init_data_hdr);
+ init_data_hdr = NULL;
+
+ return r;
+}
+
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size)
+{
+ uint32_t data_offset = 0;
+ uint32_t data_size = 0;
+ enum amd_sriov_msg_table_id_enum data_table_id = data_id;
+
+ if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
+ return -EINVAL;
+
+ data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
+ data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
+
+ /* Validate on input params */
+ if (!binary || !size || *size < (uint64_t)data_size)
+ return -EINVAL;
+
+ /* Proceed to copy the dynamic content */
+ amdgpu_device_vram_access(adev,
+ (uint64_t)data_offset, (uint32_t *)binary, data_size, false);
+ *size = (uint64_t)data_size;
+
+ dev_dbg(adev->dev,
+ "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
+
+ return 0;
+}
+
void amdgpu_virt_init(struct amdgpu_device *adev)
{
bool is_sriov = false;
@@ -1289,7 +1577,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
@@ -1304,7 +1592,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
@@ -1383,7 +1671,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
@@ -1515,7 +1803,7 @@ static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d1172c8e58c4..14d864be5800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -54,6 +54,12 @@
#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */
+#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA"
+#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4
+
+#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id)))
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -262,6 +268,11 @@ struct amdgpu_virt_ras {
DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+struct amdgpu_virt_region {
+ uint32_t offset;
+ uint32_t size_kb;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -289,6 +300,12 @@ struct amdgpu_virt {
bool ras_init_done;
uint32_t reg_access;
+ /* dynamic(v2) critical regions */
+ struct amdgpu_virt_region init_data_header;
+ struct amdgpu_virt_region crit_regn;
+ struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID];
+ bool is_dynamic_crit_regn_enabled;
+
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
@@ -424,6 +441,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_init(struct amdgpu_device *adev);
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size);
+
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c1a801203949..9309830821b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -779,7 +779,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
- struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -842,12 +841,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
}
if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
- r = amdgpu_fence_emit(ring, &fence, NULL, 0);
+ r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
- /* this is part of the job's context */
- af = container_of(fence, struct amdgpu_fence, base);
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ fence = &job->hw_vm_fence->base;
}
if (vm_flush_needed) {
@@ -1952,6 +1949,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
+ int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1972,6 +1970,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
return -ENOENT;
}
+ /* It's unlikely to happen that the mapping userq hasn't been idled
+ * during user requests GEM unmap IOCTL except for forcing the unmap
+ * from user space.
+ */
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
+ r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
+ if (unlikely(r == -EBUSY))
+ dev_warn_once(adev->dev,
+ "Attempt to unmap an active userq buffer\n");
+ }
+
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
mapping->bo_va = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 474bfe36c0c2..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
+
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 3a79ed7d8031..1cee083fb6bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -23,26 +23,84 @@
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
-/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
-#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
-#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+#define AMD_SRIOV_MSG_SIZE_KB 1
+
/*
- * layout
+ * layout v1
* 0 64KB 65KB 66KB 68KB 132KB
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
-#define AMD_SRIOV_MSG_SIZE_KB 1
-#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
-#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+/*
+ * layout v2 (offsets are dynamically allocated and the offsets below are examples)
+ * 0 1KB 64KB 65KB 66KB 68KB 132KB
+ * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ *
+ * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously)
+ */
+
+/* v1 layout sizes */
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+
+/* v1 offsets */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1)
+
+enum amd_sriov_crit_region_version {
+ GPU_CRIT_REGION_V1 = 1,
+ GPU_CRIT_REGION_V2 = 2,
+};
+
+/* v2 layout offset enum (in order of allocation) */
+enum amd_sriov_msg_table_id_enum {
+ AMD_SRIOV_MSG_IPD_TABLE_ID = 0,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID,
+ AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID,
+ AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID,
+ AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID,
+ AMD_SRIOV_MSG_INITD_H_TABLE_ID,
+ AMD_SRIOV_MSG_MAX_TABLE_ID,
+};
+
+struct amd_sriov_msg_init_data_header {
+ char signature[4]; /* "INDA" */
+ uint32_t version;
+ uint32_t checksum;
+ uint32_t initdata_offset; /* 0 */
+ uint32_t initdata_size_in_kb; /* 5MB */
+ uint32_t valid_tables;
+ uint32_t vbios_img_offset;
+ uint32_t vbios_img_size_in_kb;
+ uint32_t dataexchange_offset;
+ uint32_t dataexchange_size_in_kb;
+ uint32_t ras_tele_info_offset;
+ uint32_t ras_tele_info_size_in_kb;
+ uint32_t ip_discovery_offset;
+ uint32_t ip_discovery_size_in_kb;
+ uint32_t bad_page_info_offset;
+ uint32_t bad_page_size_in_kb;
+ uint32_t reserved[8];
+};
/*
* PF2VF history log:
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
index 96616a865aac..ed1e25661706 100644
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 8841d7213de4..751732f3e883 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9951,6 +9951,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d61eb9f187c6..252517ce5d5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -2438,7 +2438,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
if (version_minor == 3)
gfx_v11_0_load_rlcp_rlcv_microcode(adev);
}
-
+
return 0;
}
@@ -3886,7 +3886,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
memcpy(fw, fw_data, fw_size);
-
+
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
@@ -7318,6 +7318,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
};
static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 93fde0f9af87..35d5a7e99a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -5595,6 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
};
static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 0856ff65288c..d3d0a4b0380c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6939,6 +6939,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dd19a97436db..f1a2efc2a8d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 77f9d5b9a556..e0b50c690f8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -2152,7 +2152,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
+static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
+ bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -2186,8 +2187,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
}
-
- return 0;
}
static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
@@ -2220,7 +2219,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring;
- int i, r;
+ int i;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
@@ -2228,9 +2227,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
ring = &adev->gfx.compute_ring[i + xcc_id *
adev->gfx.num_compute_rings];
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- if (r)
- return r;
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
}
return amdgpu_gfx_enable_kcq(adev, xcc_id);
@@ -3605,11 +3602,8 @@ pipe_reset:
return r;
}
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- if (r) {
- dev_err(adev->dev, "fail to init kcq\n");
- return r;
- }
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
@@ -4798,6 +4792,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
};
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index f4a19357ccbc..cad2d19105c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -312,9 +312,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
}
- mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
- mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 0d1dd587db5f..e716097dfde4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1843,6 +1843,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
adev->gmc.vram_vendor = vram_info & 0xF;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index 1cd9eaeef38f..b1ee9473d628 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -205,10 +205,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
struct mes_detect_and_reset_queue_input input;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
unsigned int hung_db_num = 0;
- int queue_id, r, i;
+ unsigned long queue_id;
u32 db_array[8];
+ int r, i;
if (db_array_size > 8) {
dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
@@ -227,16 +227,14 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
} else if (hung_db_num) {
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (queue->queue_type == queue_type) {
- for (i = 0; i < hung_db_num; i++) {
- if (queue->doorbell_index == db_array[i]) {
- queue->state = AMDGPU_USERQ_STATE_HUNG;
- atomic_inc(&adev->gpu_reset_counter);
- amdgpu_userq_fence_driver_force_completion(queue);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
- }
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
}
}
}
@@ -254,7 +252,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
- struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -280,8 +277,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
- if (adev->gfx.funcs->get_gfx_shadow_info)
- adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@@ -298,8 +293,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
- if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
- max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+ r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ 2048);
+ if (r)
goto free_mqd;
userq_props->eop_gpu_addr = compute_mqd->eop_va;
@@ -311,6 +307,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
@@ -330,8 +334,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
- if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
- shadow_info.shadow_size))
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size);
+ if (r)
+ goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
goto free_mqd;
kfree(mqd_gfx_v11);
@@ -350,9 +359,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
r = -ENOMEM;
goto free_mqd;
}
-
- if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
- shadow_info.csa_size))
+ r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ 32);
+ if (r)
goto free_mqd;
userq_props->csa_addr = mqd_sdma_v11->csa_va;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index da575bb1377f..3a52754b5cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -369,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -379,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x60)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 7f3512d9de07..744e95d3984a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -361,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -371,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x5a)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes,
AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e5282a5d05d9..cd5b2f07edb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -222,12 +222,20 @@ send_request:
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
- adev->virt.req_init_data_ver =
- RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
-
- /* assume V1 in case host doesn't set version number */
- if (adev->virt.req_init_data_ver < 1)
- adev->virt.req_init_data_ver = 1;
+ switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
+ case GPU_CRIT_REGION_V2:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
+ adev->virt.init_data_header.offset =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
+ adev->virt.init_data_header.size_kb =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
+ break;
+ default:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
+ adev->virt.init_data_header.offset = -1;
+ adev->virt.init_data_header.size_kb = 0;
+ break;
+ }
}
}
@@ -285,7 +293,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+ return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
+ 0, GPU_CRIT_REGION_V2, 0);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 1c22bc11c1f8..bdfd2917e3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp;
-
- tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
- /* If it is VF or subrevision holds a non-zero value, that should be used */
- if (tmp || amdgpu_sriov_vf(adev))
- return tmp;
+ u32 rev_id;
- /* If discovery subrev is not updated, use register version */
- tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
- tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
- STRAP_ATI_REV_ID_DEV0_F0);
+ /*
+ * fetch the sub-revision field from the IP-discovery table
+ * (returns zero if the table entry is not populated).
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
+ } else {
+ rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
+ STRAP_ATI_REV_ID_DEV0_F0);
+ }
- return tmp;
+ return rev_id;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 6e7bc983fc0b..4fbe865ff279 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1897,6 +1897,8 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ int ret = 0;
+
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
@@ -1904,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_kfd_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -1920,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
- return 0;
+ return ret;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 4ceb251312a6..d76fb61869c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -28,6 +28,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
/*
* GFX9 SQ Interrupts
@@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
kfd_signal_poison_consumed_event(dev, pasid);
- event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
+ if (amdgpu_uniras_enabled(dev->adev))
+ event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION);
+ else
+ event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
RAS_EVENT_LOG(dev->adev, event_id,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 59a5a3fea65d..46c84fc60af1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 2eebf67f9c2c..2b7fd442d29c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ddfe30c13e9d..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
@@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work)
release_work);
struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
-
/*
* If GPU in reset, user queues may still running, wait for reset complete.
*/
@@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d72411c3379..ffb7b36e577c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1737,12 +1737,18 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner,
- &hmm_range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (likely(range))
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, range);
+ else
+ r = -ENOMEM;
WRITE_ONCE(p->svms.faulting_task, NULL);
- if (r)
+ if (r) {
+ amdgpu_hmm_range_free(range);
+ range = NULL;
pr_debug("failed %d to get svm range pages\n", r);
+ }
} else {
r = -EFAULT;
}
@@ -1750,7 +1756,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
@@ -1758,13 +1764,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized
- * Overrride return value to TRY AGAIN only if prior returns
+ * Override return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ if (range)
+ amdgpu_hmm_range_free(range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 01c7a4877904..a63dfc95b602 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 32f7850abc61..bb0fe91a1601 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
- struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
@@ -3392,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
+}
+
static int dm_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3576,6 +3637,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3786,7 +3853,9 @@ void amdgpu_dm_update_connector_after_detect(
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- guard(mutex)(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3849,6 +3918,10 @@ void amdgpu_dm_update_connector_after_detect(
}
update_subconnector_property(aconnector);
+
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -5133,6 +5206,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -5152,6 +5226,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -5407,6 +5488,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -7145,29 +7232,101 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll() - Poll a connector to see if it's connected to a display
+ *
+ * Used for connectors that don't support HPD (hotplug detection)
+ * to periodically checked whether the connector is connected to a display.
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -7218,6 +7377,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ };
+ ret = 0;
}
return ret;
@@ -7260,6 +7433,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -7312,10 +7492,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -8155,7 +8341,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -8171,6 +8357,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -8222,7 +8412,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -8244,6 +8434,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -8254,23 +8462,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
(connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
@@ -8471,6 +8662,10 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || amdgpu_dm_connector->dc_sink->edid_caps.analog ||
+ !dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8494,6 +8689,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink->edid_caps.analog) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8562,6 +8766,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8763,7 +8972,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -10519,7 +10728,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index db75e991ac7b..5a7aa903bd3c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -993,6 +993,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 1ec9d03ad747..38f9ea313dcb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f263e1a4537e..cb4bb67289a4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fe100e4c9801..eb2c587b0b9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
@@ -130,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a1c722112c22..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
@@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
@@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int irq_type;
int i;
+ bool use_polling = false;
/* First, clear all hpd and hpdrx interrupts */
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
@@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index dc943abd6dba..7277ed21552f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -36,7 +36,7 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
-DC_LIBS += dml2
+DC_LIBS += dml2_0
DC_LIBS += soc_and_ip_translator
endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 154fd2c18e88..4120d6c4c5e4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
ATOM_OBJECT *object);
static struct device_id device_type_from_device_id(uint16_t device_id);
static uint32_t signal_to_ss_id(enum as_signal_type signal);
-static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id);
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
ATOM_OBJECT *object);
@@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
info->pll_info.max_output_pxl_clk_pll_frequency =
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
/* Since there is no information on the SS, report conservative
@@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
info->external_clock_source_frequency_for_dp =
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+ info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
/* There should be only one entry in the SS info table for Memory Clock
*/
@@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
return bp->cmd_tbl.transmitter_control(bp, cntl);
}
+static enum bp_result bios_parser_select_crtc_source(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
static enum bp_result bios_parser_encoder_control(
struct dc_bios *dcb,
struct bp_encoder_control *cntl)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
+ if (cntl->engine_id == ENGINE_ID_DACA) {
+ if (!bp->cmd_tbl.dac1_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac1_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ } else if (cntl->engine_id == ENGINE_ID_DACB) {
+ if (!bp->cmd_tbl.dac2_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac2_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ }
+
if (!bp->cmd_tbl.dig_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
}
+static enum bp_result bios_parser_dac_load_detection(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct dc_context *ctx = dcb->ctx;
+ struct bp_load_detection_parameters bp_params = {0};
+ enum bp_result bp_result;
+ uint32_t bios_0_scratch;
+ uint32_t device_id_mask = 0;
+
+ bp_params.engine_id = engine_id;
+ bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
+
+ if (engine_id != ENGINE_ID_DACA &&
+ engine_id != ENGINE_ID_DACB)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (!bp->cmd_tbl.dac_load_detection)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT1_MASK;
+ else if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT2_MASK;
+ else
+ return BP_RESULT_UNSUPPORTED;
+
+ /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+ bios_0_scratch &= ~device_id_mask;
+ dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
+
+ bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
+
+ if (bp_result != BP_RESULT_OK)
+ return bp_result;
+
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+
+ if (bios_0_scratch & device_id_mask)
+ return BP_RESULT_OK;
+
+ return BP_RESULT_FAILURE;
+}
+
static enum bp_result bios_parser_adjust_pixel_clock(
struct dc_bios *dcb,
struct bp_adjust_pixel_clock_parameters *bp_params)
@@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- uint32_t mask = get_support_mask_for_device_id(id);
+ uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
}
@@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
return clk_id_ss;
}
-static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id)
{
- enum dal_device_type device_type = device_id.device_type;
- uint32_t enum_id = device_id.enum_id;
-
switch (device_type) {
case DEVICE_TYPE_LCD:
switch (enum_id) {
@@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_device_id_supported = bios_parser_is_device_id_supported,
/* COMMANDS */
+ .select_crtc_source = bios_parser_select_crtc_source,
+
.encoder_control = bios_parser_encoder_control,
+ .dac_load_detection = bios_parser_dac_load_detection,
+
.transmitter_control = bios_parser_transmitter_control,
.enable_crtc = bios_parser_enable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 58e88778da7f..22457f417e65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
static void init_set_pixel_clock(struct bios_parser *bp);
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_load_detection(struct bios_parser *bp);
static void init_dac_output_control(struct bios_parser *bp);
static void init_set_crtc_timing(struct bios_parser *bp);
static void init_enable_crtc(struct bios_parser *bp);
@@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_pixel_clock(bp);
init_enable_spread_spectrum_on_ppll(bp);
init_adjust_display_pll(bp);
+ init_select_crtc_source(bp);
init_dac_encoder_control(bp);
+ init_dac_load_detection(bp);
init_dac_output_control(bp);
init_set_crtc_timing(bp);
init_enable_crtc(bp);
@@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3(
/*******************************************************************************
********************************************************************************
**
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 1:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PS_ALLOCATION params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ switch (bp_params->engine_id) {
+ case ENGINE_ID_DACA:
+ params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENGINE_ID_DACB:
+ params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static bool select_crtc_source_v2_encoder_id(
+ enum engine_id engine_id, uint8_t *out_encoder_id)
+{
+ uint8_t encoder_id = 0;
+
+ switch (engine_id) {
+ case ENGINE_ID_DIGA:
+ encoder_id = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGB:
+ encoder_id = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGC:
+ encoder_id = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGD:
+ encoder_id = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGE:
+ encoder_id = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGF:
+ encoder_id = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGG:
+ encoder_id = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACA:
+ encoder_id = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACB:
+ encoder_id = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_id = encoder_id;
+ return true;
+}
+
+static bool select_crtc_source_v2_encoder_mode(
+ enum signal_type signal_type, uint8_t *out_encoder_mode)
+{
+ uint8_t encoder_mode = 0;
+
+ switch (signal_type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ encoder_mode = ATOM_ENCODER_MODE_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ encoder_mode = ATOM_ENCODER_MODE_HDMI;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ encoder_mode = ATOM_ENCODER_MODE_LVDS;
+ break;
+ case SIGNAL_TYPE_RGB:
+ encoder_mode = ATOM_ENCODER_MODE_CRT;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ encoder_mode = ATOM_ENCODER_MODE_DP_MST;
+ break;
+ case SIGNAL_TYPE_EDP:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_mode = encoder_mode;
+ return true;
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ params.ucDstBpc = bp_params->bit_depth;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC ENCODER CONTROL
**
********************************************************************************
@@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1(
/*******************************************************************************
********************************************************************************
**
+ ** DAC LOAD DETECTION
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static void init_dac_load_detection(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
+ case 1:
+ case 2:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
+ break;
+ case 3:
+ default:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
+ break;
+ }
+}
+
+static void dac_load_detect_prepare_params(
+ struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
+ enum engine_id engine_id,
+ uint16_t device_id,
+ uint8_t misc)
+{
+ uint8_t dac_type = ENGINE_ID_DACA;
+
+ if (engine_id == ENGINE_ID_DACB)
+ dac_type = ATOM_DAC_B;
+
+ params->sDacload.usDeviceID = cpu_to_le16(device_id);
+ params->sDacload.ucDacType = dac_type;
+ params->sDacload.ucMisc = misc;
+}
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ 0);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ uint8_t misc = 0;
+
+ if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
+ bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
+ misc = DAC_LOAD_MISC_YPrPb;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ misc);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC OUTPUT CONTROL
**
********************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index ad533775e724..e89b1ba0048b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -52,6 +52,9 @@ struct cmd_tbl {
enum bp_result (*adjust_display_pll)(
struct bios_parser *bp,
struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
@@ -68,6 +71,9 @@ struct cmd_tbl {
enum bp_result (*dac2_output_control)(
struct bios_parser *bp,
bool enable);
+ enum bp_result (*dac_load_detection)(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
enum bp_result (*set_crtc_timing)(
struct bios_parser *bp,
struct bp_hw_crtc_timing_parameters *bp_params);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 9e63fa72101c..db687a13174d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -509,16 +509,16 @@ void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_b
regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index b315ed91e010..3a881451e9da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -40,7 +40,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-
+#include "reg_helper.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
@@ -48,9 +48,43 @@
#include "link_service.h"
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 8
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
+
+#define regCLK1_CLK0_CURRENT_CNT 0x0314
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x0315
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x0316
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x0317
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x0318
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0319
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define UNSUPPORTED_DCFCLK 10000000
#define MIN_DPP_DISP_CLK 100000
@@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+}
+
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ struct dcn35_clk_internal internal = {0};
+
+ dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
return;
}
@@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn315_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn315_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+void dcn315_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
+ struct clk_log_info log_info = {0};
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
+}
+
void dcn315_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn315 *clk_mgr,
@@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
+ clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
index ac36ddf5dd1a..642ae3d4a790 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
@@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn315_init_clocks(struct clk_mgr *clk_mgr);
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN315_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b11383fba35f..35d20a663d67 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
- if (actual_dtbclk) {
+ if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
@@ -633,16 +635,16 @@ static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs
regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5f2d5638c819..b720e007c654 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -83,7 +83,7 @@
#include "hw_sequencer_private.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_internal_types.h"
#include "soc_and_ip_translator.h"
#endif
@@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
/* Private functions */
-static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+static inline void elevate_update_type(
+ struct surface_update_descriptor *descriptor,
+ enum surface_update_type new_type,
+ enum dc_lock_descriptor new_locks
+)
{
- if (new > *original)
- *original = new;
+ if (new_type > descriptor->update_type)
+ descriptor->update_type = new_type;
+
+ descriptor->lock_descriptor |= new_locks;
}
static void destroy_links(struct dc *dc)
@@ -493,9 +499,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
1,
*adjust);
stream->adjust.timing_adjust_pending = false;
+
+ if (dc->hwss.notify_cursor_offload_drr_update)
+ dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
+
return true;
}
}
+
return false;
}
@@ -1143,8 +1154,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
- if (dc->caps.max_optimizable_video_width == 0)
- dc->caps.max_optimizable_video_width = 5120;
+ if (dc->check_config.max_optimizable_video_width == 0)
+ dc->check_config.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -2158,8 +2169,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -2188,8 +2199,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true);
+
if (result != DC_OK) {
/* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
@@ -2229,8 +2246,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2645,47 +2662,49 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
+static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE);
if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
@@ -2697,7 +2716,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* recalculate stutter period.
*/
update_flags->bits.dcc_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
@@ -2706,30 +2725,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
+ const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
- if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(struct dc_tiling_info)) != 0) {
+ if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
-
- /* todo: below are HW dependent, we should add a hook to
- * DCE/N resource and validated there.
- */
- if (!dc->debug.skip_full_updated_if_possible) {
- /* swizzled mode requires RQ to be setup properly,
- * thus need to run DML to calculate RQ settings
- */
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
+
+ switch (tiling->gfxversion) {
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
+ if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
+ update_flags->bits.bandwidth_change = 1;
+ }
+ break;
+ case DcGfxAddr3:
+ if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
+ update_flags->bits.bandwidth_change = 1;
+ }
+ break;
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ case DcGfxVersionUnknown:
+ default:
+ break;
}
}
@@ -2737,14 +2767,17 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
return update_type;
}
-static enum surface_update_type get_scaling_info_update_type(
- const struct dc *dc,
+static struct surface_update_descriptor get_scaling_info_update_type(
+ const struct dc_check_config *check_config,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE);
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
@@ -2768,7 +2801,7 @@ static enum surface_update_type get_scaling_info_update_type(
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
@@ -2787,40 +2820,41 @@ static enum surface_update_type get_scaling_info_update_type(
if (update_flags->bits.clock_change
|| update_flags->bits.bandwidth_change
|| update_flags->bits.scaling_change)
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
if (update_flags->bits.position_change)
- return UPDATE_TYPE_MED;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
- return UPDATE_TYPE_FAST;
+ return update_type;
}
-static enum surface_update_type det_surface_update(const struct dc *dc,
- const struct dc_surface_update *u)
+static struct surface_update_descriptor det_surface_update(
+ const struct dc_check_config *check_config,
+ struct dc_surface_update *u)
{
- const struct dc_state *context = dc->current_state;
- enum surface_update_type type;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ if (u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
+ return overall_type;
}
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
+
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
- type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ inner_type = get_scaling_info_update_type(check_config, u);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
}
if (u->in_transfer_func)
@@ -2856,13 +2890,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ // TODO: Should be fast?
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (u->sdr_white_level_nits)
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
update_flags->bits.sdr_white_level_nits = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ // TODO: Should be fast?
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (u->cm2_params) {
@@ -2876,27 +2912,24 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.mcm_transfer_function_enable_change = 1;
}
if (update_flags->bits.in_transfer_func_change) {
- type = UPDATE_TYPE_MED;
- elevate_update_type(&overall_type, type);
+ // TODO: Fast?
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
}
if (update_flags->bits.lut_3d &&
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (update_flags->bits.mcm_transfer_function_enable_change) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
- if (dc->debug.enable_legacy_fast_update &&
+ if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
update_flags->bits.coeff_reduction_change)) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
return overall_type;
}
@@ -2924,40 +2957,34 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
}
}
-static enum surface_update_type check_update_surfaces_for_stream(
- struct dc *dc,
+static struct surface_update_descriptor check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-
- if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
- overall_type = UPDATE_TYPE_FULL;
-
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- overall_type = UPDATE_TYPE_FULL;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (stream_update && stream_update->pending_test_pattern) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
if (stream_update && stream_update->hw_cursor_req) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
}
/* some stream updates require passive update */
if (stream_update) {
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
@@ -2993,7 +3020,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
su_flags->bits.out_csc = 1;
if (su_flags->raw != 0)
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
if (stream_update->output_csc_transform)
su_flags->bits.out_csc = 1;
@@ -3001,15 +3028,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
/* Output transfer function changes do not require bandwidth recalculation,
* so don't trigger a full update
*/
- if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
}
- for (i = 0 ; i < surface_count; i++) {
- enum surface_update_type type =
- det_surface_update(dc, &updates[i]);
+ for (int i = 0 ; i < surface_count; i++) {
+ struct surface_update_descriptor inner_type =
+ det_surface_update(check_config, &updates[i]);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return overall_type;
@@ -3020,44 +3047,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type type;
-
if (stream_update)
stream_update->stream->update_flags.raw = 0;
- for (i = 0; i < surface_count; i++)
+ for (size_t i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
- type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL) {
- if (stream_update) {
- uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
- stream_update->stream->update_flags.raw = 0xFFFFFFFF;
- stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
- }
- for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.raw = 0xFFFFFFFF;
- }
-
- if (type == UPDATE_TYPE_FAST) {
- // If there's an available clock comparator, we use that.
- if (dc->clk_mgr->funcs->are_clock_states_equal) {
- if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
- dc->optimized_required = true;
- // Else we fallback to mem compare.
- } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
- dc->optimized_required = true;
- }
- }
-
- return type;
+ return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
static struct dc_stream_status *stream_get_status(
@@ -3426,6 +3427,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3472,7 +3480,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
+
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
@@ -3504,6 +3515,16 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4149,7 +4170,7 @@ static void commit_planes_for_stream(struct dc *dc,
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4176,16 +4197,16 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
@@ -4228,9 +4249,8 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
-
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
return;
}
@@ -4419,7 +4439,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4467,13 +4487,13 @@ static void commit_planes_for_stream(struct dc *dc,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4489,6 +4509,8 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->plane_state->skip_manual_trigger)
continue;
+ if (dc->hwss.program_cursor_offload_now)
+ dc->hwss.program_cursor_offload_now(dc, pipe_ctx);
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
@@ -4994,7 +5016,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
}
}
-static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
@@ -5035,18 +5057,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
return false;
}
-static bool full_update_required(struct dc *dc,
- struct dc_surface_update *srf_updates,
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
-
- int i;
- struct dc_stream_status *stream_status;
const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
- for (i = 0; i < surface_count; i++) {
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
@@ -5062,8 +5110,7 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm2_params &&
(srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
- !is_surface_in_context(context, srf_updates[i].surface)))
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
return true;
}
@@ -5099,26 +5146,16 @@ static bool full_update_required(struct dc *dc,
stream_update->hw_cursor_req))
return true;
- if (stream) {
- stream_status = dc_stream_get_status(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
return false;
}
-static bool fast_update_only(struct dc *dc,
- struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
@@ -5181,7 +5218,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5224,7 +5261,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update)
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5350,7 +5387,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
else
@@ -6349,7 +6387,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc)
return false;
}
-bool dc_can_clear_cursor_limit(struct dc *dc)
+bool dc_can_clear_cursor_limit(const struct dc *dc)
{
uint32_t i;
@@ -6378,3 +6416,8 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
if (dc->hwss.get_underflow_debug_data)
dc->hwss.get_underflow_debug_data(dc, tg, out_data);
}
+
+void dc_log_preos_dmcub_info(const struct dc *dc)
+{
+ dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index d82b1cb467f4..f95cb0cf4b8a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -32,6 +32,12 @@
#include "resource.h"
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
+#include "opp.h"
+#include "dsc.h"
+#include "dchubbub.h"
+#include "dccg.h"
+#include "abm.h"
+#include "dcn10/dcn10_hubbub.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
#define MAX_NUM_MCACHE 8
@@ -755,11 +761,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
if (dc->hwss.pipe_control_lock) {
@@ -784,7 +790,7 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_mpc_pipe) {
if (current_mpc_pipe->plane_state) {
if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp;
block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
(*num_steps)++;
@@ -894,11 +900,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -911,6 +917,13 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
+ if (dc->hwss.program_cursor_offload_now) {
+ block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc;
+ block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW;
+ (*num_steps)++;
+ }
+
block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
(*num_steps)++;
@@ -942,8 +955,9 @@ void hwss_execute_sequence(struct dc *dc,
params->pipe_control_lock_params.lock);
break;
case HUBP_SET_FLIP_CONTROL_GSL:
- dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
- params->set_flip_control_gsl_params.flip_immediate);
+ params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ params->set_flip_control_gsl_params.hubp,
+ params->set_flip_control_gsl_params.flip_immediate);
break;
case HUBP_PROGRAM_TRIPLEBUFFER:
dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
@@ -1001,8 +1015,298 @@ void hwss_execute_sequence(struct dc *dc,
params->wait_for_dcc_meta_propagation_params.dc,
params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
break;
- case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
- dc->hwss.fams2_global_control_lock_fast(params);
+ case DMUB_HW_CONTROL_LOCK_FAST:
+ dc->hwss.dmub_hw_control_lock_fast(params);
+ break;
+ case HUBP_PROGRAM_SURFACE_CONFIG:
+ hwss_program_surface_config(params);
+ break;
+ case HUBP_PROGRAM_MCACHE_ID:
+ hwss_program_mcache_id_and_split_coordinate(params);
+ break;
+ case PROGRAM_CURSOR_UPDATE_NOW:
+ dc->hwss.program_cursor_offload_now(
+ params->program_cursor_update_now_params.dc,
+ params->program_cursor_update_now_params.pipe_ctx);
+ break;
+ case HUBP_WAIT_PIPE_READ_START:
+ params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start(
+ params->hubp_wait_pipe_read_start_params.hubp);
+ break;
+ case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM:
+ dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx);
+ break;
+ case HWS_UPDATE_PHANTOM_VP_POSITION:
+ dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc,
+ params->update_phantom_vp_position_params.context,
+ params->update_phantom_vp_position_params.pipe_ctx);
+ break;
+ case OPTC_SET_ODM_COMBINE:
+ hwss_set_odm_combine(params);
+ break;
+ case OPTC_SET_ODM_BYPASS:
+ hwss_set_odm_bypass(params);
+ break;
+ case OPP_PIPE_CLOCK_CONTROL:
+ hwss_opp_pipe_clock_control(params);
+ break;
+ case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL:
+ hwss_opp_program_left_edge_extra_pixel(params);
+ break;
+ case DCCG_SET_DTO_DSCCLK:
+ hwss_dccg_set_dto_dscclk(params);
+ break;
+ case DSC_SET_CONFIG:
+ hwss_dsc_set_config(params);
+ break;
+ case DSC_ENABLE:
+ hwss_dsc_enable(params);
+ break;
+ case TG_SET_DSC_CONFIG:
+ hwss_tg_set_dsc_config(params);
+ break;
+ case DSC_DISCONNECT:
+ hwss_dsc_disconnect(params);
+ break;
+ case DSC_READ_STATE:
+ hwss_dsc_read_state(params);
+ break;
+ case DSC_CALCULATE_AND_SET_CONFIG:
+ hwss_dsc_calculate_and_set_config(params);
+ break;
+ case DSC_ENABLE_WITH_OPP:
+ hwss_dsc_enable_with_opp(params);
+ break;
+ case TG_PROGRAM_GLOBAL_SYNC:
+ hwss_tg_program_global_sync(params);
+ break;
+ case TG_WAIT_FOR_STATE:
+ hwss_tg_wait_for_state(params);
+ break;
+ case TG_SET_VTG_PARAMS:
+ hwss_tg_set_vtg_params(params);
+ break;
+ case TG_SETUP_VERTICAL_INTERRUPT2:
+ hwss_tg_setup_vertical_interrupt2(params);
+ break;
+ case DPP_SET_HDR_MULTIPLIER:
+ hwss_dpp_set_hdr_multiplier(params);
+ break;
+ case HUBP_PROGRAM_DET_SIZE:
+ hwss_program_det_size(params);
+ break;
+ case HUBP_PROGRAM_DET_SEGMENTS:
+ hwss_program_det_segments(params);
+ break;
+ case OPP_SET_DYN_EXPANSION:
+ hwss_opp_set_dyn_expansion(params);
+ break;
+ case OPP_PROGRAM_FMT:
+ hwss_opp_program_fmt(params);
+ break;
+ case OPP_PROGRAM_BIT_DEPTH_REDUCTION:
+ hwss_opp_program_bit_depth_reduction(params);
+ break;
+ case OPP_SET_DISP_PATTERN_GENERATOR:
+ hwss_opp_set_disp_pattern_generator(params);
+ break;
+ case ABM_SET_PIPE:
+ hwss_set_abm_pipe(params);
+ break;
+ case ABM_SET_LEVEL:
+ hwss_set_abm_level(params);
+ break;
+ case ABM_SET_IMMEDIATE_DISABLE:
+ hwss_set_abm_immediate_disable(params);
+ break;
+ case MPC_REMOVE_MPCC:
+ hwss_mpc_remove_mpcc(params);
+ break;
+ case OPP_SET_MPCC_DISCONNECT_PENDING:
+ hwss_opp_set_mpcc_disconnect_pending(params);
+ break;
+ case DC_SET_OPTIMIZED_REQUIRED:
+ hwss_dc_set_optimized_required(params);
+ break;
+ case HUBP_DISCONNECT:
+ hwss_hubp_disconnect(params);
+ break;
+ case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL:
+ hwss_hubbub_force_pstate_change_control(params);
+ break;
+ case TG_ENABLE_CRTC:
+ hwss_tg_enable_crtc(params);
+ break;
+ case TG_SET_GSL:
+ hwss_tg_set_gsl(params);
+ break;
+ case TG_SET_GSL_SOURCE_SELECT:
+ hwss_tg_set_gsl_source_select(params);
+ break;
+ case HUBP_WAIT_FLIP_PENDING:
+ hwss_hubp_wait_flip_pending(params);
+ break;
+ case TG_WAIT_DOUBLE_BUFFER_PENDING:
+ hwss_tg_wait_double_buffer_pending(params);
+ break;
+ case UPDATE_FORCE_PSTATE:
+ hwss_update_force_pstate(params);
+ break;
+ case HUBBUB_APPLY_DEDCN21_147_WA:
+ hwss_hubbub_apply_dedcn21_147_wa(params);
+ break;
+ case HUBBUB_ALLOW_SELF_REFRESH_CONTROL:
+ hwss_hubbub_allow_self_refresh_control(params);
+ break;
+ case TG_GET_FRAME_COUNT:
+ hwss_tg_get_frame_count(params);
+ break;
+ case MPC_SET_DWB_MUX:
+ hwss_mpc_set_dwb_mux(params);
+ break;
+ case MPC_DISABLE_DWB_MUX:
+ hwss_mpc_disable_dwb_mux(params);
+ break;
+ case MCIF_WB_CONFIG_BUF:
+ hwss_mcif_wb_config_buf(params);
+ break;
+ case MCIF_WB_CONFIG_ARB:
+ hwss_mcif_wb_config_arb(params);
+ break;
+ case MCIF_WB_ENABLE:
+ hwss_mcif_wb_enable(params);
+ break;
+ case MCIF_WB_DISABLE:
+ hwss_mcif_wb_disable(params);
+ break;
+ case DWBC_ENABLE:
+ hwss_dwbc_enable(params);
+ break;
+ case DWBC_DISABLE:
+ hwss_dwbc_disable(params);
+ break;
+ case DWBC_UPDATE:
+ hwss_dwbc_update(params);
+ break;
+ case HUBP_UPDATE_MALL_SEL:
+ hwss_hubp_update_mall_sel(params);
+ break;
+ case HUBP_PREPARE_SUBVP_BUFFERING:
+ hwss_hubp_prepare_subvp_buffering(params);
+ break;
+ case HUBP_SET_BLANK_EN:
+ hwss_hubp_set_blank_en(params);
+ break;
+ case HUBP_DISABLE_CONTROL:
+ hwss_hubp_disable_control(params);
+ break;
+ case HUBBUB_SOFT_RESET:
+ hwss_hubbub_soft_reset(params);
+ break;
+ case HUBP_CLK_CNTL:
+ hwss_hubp_clk_cntl(params);
+ break;
+ case HUBP_INIT:
+ hwss_hubp_init(params);
+ break;
+ case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS:
+ hwss_hubp_set_vm_system_aperture_settings(params);
+ break;
+ case HUBP_SET_FLIP_INT:
+ hwss_hubp_set_flip_int(params);
+ break;
+ case DPP_DPPCLK_CONTROL:
+ hwss_dpp_dppclk_control(params);
+ break;
+ case DISABLE_PHANTOM_CRTC:
+ hwss_disable_phantom_crtc(params);
+ break;
+ case DSC_PG_STATUS:
+ hwss_dsc_pg_status(params);
+ break;
+ case DSC_WAIT_DISCONNECT_PENDING_CLEAR:
+ hwss_dsc_wait_disconnect_pending_clear(params);
+ break;
+ case DSC_DISABLE:
+ hwss_dsc_disable(params);
+ break;
+ case DCCG_SET_REF_DSCCLK:
+ hwss_dccg_set_ref_dscclk(params);
+ break;
+ case DPP_PG_CONTROL:
+ hwss_dpp_pg_control(params);
+ break;
+ case HUBP_PG_CONTROL:
+ hwss_hubp_pg_control(params);
+ break;
+ case HUBP_RESET:
+ hwss_hubp_reset(params);
+ break;
+ case DPP_RESET:
+ hwss_dpp_reset(params);
+ break;
+ case DPP_ROOT_CLOCK_CONTROL:
+ hwss_dpp_root_clock_control(params);
+ break;
+ case DC_IP_REQUEST_CNTL:
+ hwss_dc_ip_request_cntl(params);
+ break;
+ case DCCG_UPDATE_DPP_DTO:
+ hwss_dccg_update_dpp_dto(params);
+ break;
+ case HUBP_VTG_SEL:
+ hwss_hubp_vtg_sel(params);
+ break;
+ case HUBP_SETUP2:
+ hwss_hubp_setup2(params);
+ break;
+ case HUBP_SETUP:
+ hwss_hubp_setup(params);
+ break;
+ case HUBP_SET_UNBOUNDED_REQUESTING:
+ hwss_hubp_set_unbounded_requesting(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT2:
+ hwss_hubp_setup_interdependent2(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT:
+ hwss_hubp_setup_interdependent(params);
+ break;
+ case DPP_SET_CURSOR_MATRIX:
+ hwss_dpp_set_cursor_matrix(params);
+ break;
+ case MPC_UPDATE_BLENDING:
+ hwss_mpc_update_blending(params);
+ break;
+ case MPC_ASSERT_IDLE_MPCC:
+ hwss_mpc_assert_idle_mpcc(params);
+ break;
+ case MPC_INSERT_PLANE:
+ hwss_mpc_insert_plane(params);
+ break;
+ case DPP_SET_SCALER:
+ hwss_dpp_set_scaler(params);
+ break;
+ case HUBP_MEM_PROGRAM_VIEWPORT:
+ hwss_hubp_mem_program_viewport(params);
+ break;
+ case SET_CURSOR_ATTRIBUTE:
+ hwss_set_cursor_attribute(params);
+ break;
+ case SET_CURSOR_POSITION:
+ hwss_set_cursor_position(params);
+ break;
+ case SET_CURSOR_SDR_WHITE_LEVEL:
+ hwss_set_cursor_sdr_white_level(params);
+ break;
+ case PROGRAM_OUTPUT_CSC:
+ hwss_program_output_csc(params);
+ break;
+ case HUBP_SET_BLANK:
+ hwss_hubp_set_blank(params);
+ break;
+ case PHANTOM_HUBP_POST_ENABLE:
+ hwss_phantom_hubp_post_enable(params);
break;
default:
ASSERT(false);
@@ -1011,6 +1315,338 @@ void hwss_execute_sequence(struct dc *dc,
}
}
+/**
+ * Helper function to add OPTC pipe control lock to block sequence
+ */
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool lock)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP set flip control GSL to block sequence
+ */
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool flip_immediate)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP program triplebuffer to block sequence
+ */
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enableTripleBuffer)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP update plane address to block sequence
+ */
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DPP set input transfer function to block sequence
+ */
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DPP program gamut remap to block sequence
+ */
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DPP program bias and scale to block sequence
+ */
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add OPTC program manual trigger to block sequence
+ */
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DPP set output transfer function to block sequence
+ */
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC update visual confirm to block sequence
+ */
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC power on MPC mem PWR to block sequence
+ */
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on;
+ seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC set output CSC to block sequence
+ */
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC set OCSC default to block sequence
+ */
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space colorspace,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DMUB send DMCUB command to block sequence
+ */
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DMUB SubVP save surface address to block sequence
+ */
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct dc_plane_address *addr,
+ uint8_t subvp_index)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP wait for DCC meta propagation to block sequence
+ */
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP wait pipe read start to block sequence
+ */
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HWS apply update flags for phantom to block sequence
+ */
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HWS update phantom VP position to block sequence
+ */
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add OPTC set ODM combine to block sequence
+ */
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg;
+ memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES);
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add OPTC set ODM bypass to block sequence
+ */
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_send_dmcub_cmd(union block_sequence_params *params)
{
struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
@@ -1020,6 +1656,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
+/**
+ * Helper function to add TG program global sync to block sequence
+ */
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines;
+ seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add TG wait for state to block sequence
+ */
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ enum crtc_state state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add TG set VTG params to block sequence
+ */
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dc_crtc_timing *dc_crtc_timing,
+ bool program_fp2)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2;
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add TG setup vertical interrupt2 to block sequence
+ */
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line;
+ seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add DPP set HDR multiplier to block sequence
+ */
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP program DET size to block sequence
+ */
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_buffer_size_kb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool enable,
+ bool wait)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait;
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP program DET segments to block sequence
+ */
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_size)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add OPP set dynamic expansion to block sequence
+ */
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_space,
+ enum dc_color_depth color_depth,
+ enum signal_type signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal;
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add OPP program FMT to block sequence
+ */
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping;
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add ABM set pipe to block sequence
+ */
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add ABM set level to block sequence
+ */
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm,
+ uint32_t abm_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add TG enable CRTC to block sequence
+ */
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP wait flip pending to block sequence
+ */
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add TG wait double buffer pending to block sequence
+ */
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_program_manual_trigger(union block_sequence_params *params)
{
struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
@@ -1046,12 +1952,6 @@ void hwss_setup_dpp(union block_sequence_params *params)
plane_state->color_space,
NULL);
}
-
- if (dpp && dpp->funcs->set_cursor_matrix) {
- dpp->funcs->set_cursor_matrix(dpp,
- plane_state->color_space,
- plane_state->cursor_csc_color_matrix);
- }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
@@ -1062,9 +1962,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- if (dpp->funcs->dpp_program_bias_and_scale) {
+ if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
- }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -1114,6 +2013,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
+void hwss_program_surface_config(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_surface_config_params.hubp;
+ enum surface_pixel_format format = params->program_surface_config_params.format;
+ struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info;
+ struct plane_size size = params->program_surface_config_params.plane_size;
+ enum dc_rotation_angle rotation = params->program_surface_config_params.rotation;
+ struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc;
+ bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror;
+ int compat_level = params->program_surface_config_params.compat_level;
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ format,
+ tiling_info,
+ &size,
+ rotation,
+ dcc,
+ horizontal_mirror,
+ compat_level);
+
+ hubp->power_gated = false;
+}
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs;
+
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs);
+
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -1188,6 +2120,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1264,3 +2197,1848 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}
+
+void hwss_set_odm_combine(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_combine_params.tg;
+ int *opp_inst = params->set_odm_combine_params.opp_inst;
+ int opp_head_count = params->set_odm_combine_params.opp_head_count;
+ int odm_slice_width = params->set_odm_combine_params.odm_slice_width;
+ int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width;
+
+ if (tg && tg->funcs->set_odm_combine)
+ tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count,
+ odm_slice_width, last_odm_slice_width);
+}
+
+void hwss_set_odm_bypass(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_bypass_params.tg;
+ const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing;
+
+ if (tg && tg->funcs->set_odm_bypass)
+ tg->funcs->set_odm_bypass(tg, timing);
+}
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp;
+ bool enable = params->opp_pipe_clock_control_params.enable;
+
+ if (opp && opp->funcs->opp_pipe_clock_control)
+ opp->funcs->opp_pipe_clock_control(opp, enable);
+}
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp;
+ enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding;
+ bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master;
+
+ if (opp && opp->funcs->opp_program_left_edge_extra_pixel)
+ opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master);
+}
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg;
+ int inst = params->dccg_set_dto_dscclk_params.inst;
+ int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h;
+
+ if (dccg && dccg->funcs->set_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h);
+}
+
+void hwss_dsc_set_config(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc;
+ struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg;
+
+ if (dsc && dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg);
+}
+
+void hwss_dsc_enable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_enable_params.dsc;
+ int opp_inst = params->dsc_enable_params.opp_inst;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, opp_inst);
+}
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_dsc_config_params.tg;
+ enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED;
+ uint32_t bytes_per_pixel = 0;
+ uint32_t slice_width = 0;
+
+ if (params->tg_set_dsc_config_params.enable) {
+ struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg;
+
+ if (dsc_optc_cfg) {
+ bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel;
+ slice_width = dsc_optc_cfg->slice_width;
+ optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ?
+ OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+ }
+ }
+
+ if (tg && tg->funcs->set_dsc_config)
+ tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width);
+}
+
+void hwss_dsc_disconnect(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc;
+
+ if (dsc && dsc->funcs->dsc_disconnect)
+ dsc->funcs->dsc_disconnect(dsc);
+}
+
+void hwss_dsc_read_state(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc;
+ struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state;
+
+ if (dsc && dsc->funcs->dsc_read_state)
+ dsc->funcs->dsc_read_state(dsc, dsc_state);
+}
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx;
+ struct pipe_ctx *top_pipe = pipe_ctx;
+ bool enable = params->dsc_calculate_and_set_config_params.enable;
+ int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt;
+
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!dsc || !enable)
+ return;
+
+ /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */
+ struct dsc_config dsc_cfg;
+
+ while (top_pipe->prev_odm_pipe)
+ top_pipe = top_pipe->prev_odm_pipe;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding;
+
+ /* Set DSC configuration */
+ if (dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg,
+ &params->dsc_calculate_and_set_config_params.dsc_optc_cfg);
+}
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx;
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+}
+
+void hwss_tg_program_global_sync(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_program_global_sync_params.tg;
+ int vready_offset = params->tg_program_global_sync_params.vready_offset;
+ unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines;
+ unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines;
+
+ if (tg->funcs->program_global_sync) {
+ tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines,
+ vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines);
+ }
+}
+
+void hwss_tg_wait_for_state(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_for_state_params.tg;
+ enum crtc_state state = params->tg_wait_for_state_params.state;
+
+ if (tg->funcs->wait_for_state)
+ tg->funcs->wait_for_state(tg, state);
+}
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_vtg_params_params.tg;
+ struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing;
+ bool program_fp2 = params->tg_set_vtg_params_params.program_fp2;
+
+ if (tg->funcs->set_vtg_params)
+ tg->funcs->set_vtg_params(tg, timing, program_fp2);
+}
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg;
+ int start_line = params->tg_setup_vertical_interrupt2_params.start_line;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp;
+ uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult;
+
+ if (dpp->funcs->dpp_set_hdr_multiplier)
+ dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult);
+}
+
+void hwss_program_det_size(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_size_params.hubbub;
+ unsigned int hubp_inst = params->program_det_size_params.hubp_inst;
+ unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb;
+
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb);
+}
+
+void hwss_program_det_segments(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_segments_params.hubbub;
+ unsigned int hubp_inst = params->program_det_segments_params.hubp_inst;
+ unsigned int det_size = params->program_det_segments_params.det_size;
+
+ if (hubbub->funcs->program_det_segments)
+ hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size);
+}
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp;
+ enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth;
+ enum signal_type signal = params->opp_set_dyn_expansion_params.signal;
+
+ if (opp->funcs->opp_set_dyn_expansion)
+ opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal);
+}
+
+void hwss_opp_program_fmt(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_fmt_params.opp;
+ struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping;
+
+ if (opp->funcs->opp_program_fmt)
+ opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping);
+}
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp;
+ bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params;
+ struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx;
+ struct bit_depth_reduction_params bit_depth_params;
+
+ if (use_default_params)
+ memset(&bit_depth_params, 0, sizeof(bit_depth_params));
+ else
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params);
+
+ if (opp->funcs->opp_program_bit_depth_reduction)
+ opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params);
+}
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp;
+ enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern;
+ enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth;
+ struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ?
+ &params->opp_set_disp_pattern_generator_params.solid_color : NULL;
+ int width = params->opp_set_disp_pattern_generator_params.width;
+ int height = params->opp_set_disp_pattern_generator_params.height;
+ int offset = params->opp_set_disp_pattern_generator_params.offset;
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator) {
+ opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
+}
+
+void hwss_set_abm_pipe(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_pipe_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx;
+
+ dc->hwss.set_pipe(pipe_ctx);
+}
+
+void hwss_set_abm_level(union block_sequence_params *params)
+{
+ struct abm *abm = params->set_abm_level_params.abm;
+ unsigned int abm_level = params->set_abm_level_params.abm_level;
+
+ if (abm->funcs->set_abm_level)
+ abm->funcs->set_abm_level(abm, abm_level);
+}
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_immediate_disable_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_abm_immediate_disable)
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+}
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_remove_mpcc_params.mpc;
+ struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params;
+ struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+}
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp;
+ int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst;
+ bool pending = params->opp_set_mpcc_disconnect_pending_params.pending;
+
+ opp->mpcc_disconnect_pending[mpcc_inst] = pending;
+}
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_set_optimized_required_params.dc;
+ bool optimized_required = params->dc_set_optimized_required_params.optimized_required;
+
+ dc->optimized_required = optimized_required;
+}
+
+void hwss_hubp_disconnect(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disconnect_params.hubp;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+}
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub;
+ bool enable = params->hubbub_force_pstate_change_control_params.enable;
+ bool wait = params->hubbub_force_pstate_change_control_params.wait;
+
+ if (hubbub->funcs->force_pstate_change_control) {
+ hubbub->funcs->force_pstate_change_control(hubbub, enable, wait);
+ /* Add delay when enabling pstate change control */
+ if (enable)
+ udelay(500);
+ }
+}
+
+void hwss_tg_enable_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_enable_crtc_params.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+}
+
+void hwss_tg_set_gsl(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_params.tg;
+ struct gsl_params *gsl = &params->tg_set_gsl_params.gsl;
+
+ if (tg->funcs->set_gsl)
+ tg->funcs->set_gsl(tg, gsl);
+}
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg;
+ int group_idx = params->tg_set_gsl_source_select_params.group_idx;
+ uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal;
+
+ if (tg->funcs->set_gsl_source_select)
+ tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal);
+}
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp;
+ unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+}
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg;
+ unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us;
+ int j = 0;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+}
+
+void hwss_update_force_pstate(union block_sequence_params *params)
+{
+ struct dc *dc = params->update_force_pstate_params.dc;
+ struct dc_state *context = params->update_force_pstate_params.context;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ if (hwseq->funcs.update_force_pstate)
+ hwseq->funcs.update_force_pstate(dc, context);
+}
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub;
+
+ hubbub->funcs->apply_DEDCN21_147_wa(hubbub);
+}
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub;
+ bool allow = params->hubbub_allow_self_refresh_control_params.allow;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, allow);
+
+ if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied)
+ *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true;
+}
+
+void hwss_tg_get_frame_count(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_get_frame_count_params.tg;
+ unsigned int *frame_count = params->tg_get_frame_count_params.frame_count;
+
+ *frame_count = tg->funcs->get_frame_count(tg);
+}
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc;
+ int dwb_id = params->mpc_set_dwb_mux_params.dwb_id;
+ int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id;
+
+ if (mpc->funcs->set_dwb_mux)
+ mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id);
+}
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc;
+ unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id;
+
+ if (mpc->funcs->disable_dwb_mux)
+ mpc->funcs->disable_dwb_mux(mpc, dwb_id);
+}
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb;
+ struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params;
+ unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height;
+
+ if (mcif_wb->funcs->config_mcif_buf)
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height);
+}
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb;
+ struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params;
+
+ if (mcif_wb->funcs->config_mcif_arb)
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params);
+}
+
+void hwss_mcif_wb_enable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb;
+
+ if (mcif_wb->funcs->enable_mcif)
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+}
+
+void hwss_mcif_wb_disable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb;
+
+ if (mcif_wb->funcs->disable_mcif)
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+void hwss_dwbc_enable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_enable_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params;
+
+ if (dwb->funcs->enable)
+ dwb->funcs->enable(dwb, dwb_params);
+}
+
+void hwss_dwbc_disable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_disable_params.dwb;
+
+ if (dwb->funcs->disable)
+ dwb->funcs->disable(dwb);
+}
+
+void hwss_dwbc_update(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_update_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params;
+
+ if (dwb->funcs->update)
+ dwb->funcs->update(dwb, dwb_params);
+}
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_update_mall_sel_params.hubp;
+ uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel;
+ bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor;
+
+ if (hubp && hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor);
+}
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp;
+ bool enable = params->hubp_prepare_subvp_buffering_params.enable;
+
+ if (hubp && hubp->funcs->hubp_prepare_subvp_buffering)
+ hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable);
+}
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_en_params.hubp;
+ bool enable = params->hubp_set_blank_en_params.enable;
+
+ if (hubp && hubp->funcs->set_hubp_blank_en)
+ hubp->funcs->set_hubp_blank_en(hubp, enable);
+}
+
+void hwss_hubp_disable_control(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disable_control_params.hubp;
+ bool disable = params->hubp_disable_control_params.disable;
+
+ if (hubp && hubp->funcs->hubp_disable_control)
+ hubp->funcs->hubp_disable_control(hubp, disable);
+}
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub;
+ bool reset = params->hubbub_soft_reset_params.reset;
+
+ if (hubbub)
+ params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset);
+}
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_clk_cntl_params.hubp;
+ bool enable = params->hubp_clk_cntl_params.enable;
+
+ if (hubp && hubp->funcs->hubp_clk_cntl) {
+ hubp->funcs->hubp_clk_cntl(hubp, enable);
+ hubp->power_gated = !enable;
+ }
+}
+
+void hwss_hubp_init(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_init_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_init)
+ hubp->funcs->hubp_init(hubp);
+}
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp;
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default;
+ apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high;
+ apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low;
+
+ if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings)
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+}
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_flip_int_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_set_flip_int)
+ hubp->funcs->hubp_set_flip_int(hubp);
+}
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_dppclk_control_params.dpp;
+ bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div;
+ bool enable = params->dpp_dppclk_control_params.enable;
+
+ if (dpp && dpp->funcs->dpp_dppclk_control)
+ dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable);
+}
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->disable_phantom_crtc_params.tg;
+
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+}
+
+void hwss_dsc_pg_status(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dsc_pg_status_params.hws;
+ int dsc_inst = params->dsc_pg_status_params.dsc_inst;
+
+ if (hws && hws->funcs.dsc_pg_status)
+ params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst);
+}
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc;
+
+ if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated)
+ return;
+ if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear)
+ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+}
+
+void hwss_dsc_disable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disable_params.dsc;
+
+ if (!params->dsc_disable_params.is_ungated)
+ return;
+ if (*params->dsc_disable_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_disable)
+ dsc->funcs->dsc_disable(dsc);
+}
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg;
+ int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst;
+
+ if (!params->dccg_set_ref_dscclk_params.is_ungated)
+ return;
+ if (*params->dccg_set_ref_dscclk_params.is_ungated == false)
+ return;
+
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc_inst);
+}
+
+void hwss_dpp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_pg_control_params.hws;
+ unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst;
+ bool power_on = params->dpp_pg_control_params.power_on;
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp_inst, power_on);
+}
+
+void hwss_hubp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->hubp_pg_control_params.hws;
+ unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst;
+ bool power_on = params->hubp_pg_control_params.power_on;
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp_inst, power_on);
+}
+
+void hwss_hubp_reset(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_reset_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_reset)
+ hubp->funcs->hubp_reset(hubp);
+}
+
+void hwss_dpp_reset(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_reset_params.dpp;
+
+ if (dpp && dpp->funcs->dpp_reset)
+ dpp->funcs->dpp_reset(dpp);
+}
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws;
+ unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst;
+ bool clock_on = params->dpp_root_clock_control_params.clock_on;
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on);
+}
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_ip_request_cntl_params.dc;
+ bool enable = params->dc_ip_request_cntl_params.enable;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.dc_ip_request_cntl)
+ hws->funcs.dc_ip_request_cntl(dc, enable);
+}
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg;
+ int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst;
+ int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz;
+
+ if (dccg && dccg->funcs->update_dpp_dto)
+ dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz);
+}
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_vtg_sel_params.hubp;
+ uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst;
+
+ if (hubp && hubp->funcs->hubp_vtg_sel)
+ hubp->funcs->hubp_vtg_sel(hubp, otg_inst);
+}
+
+void hwss_hubp_setup2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs;
+ union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync;
+ struct dc_crtc_timing *timing = params->hubp_setup2_params.timing;
+
+ if (hubp && hubp->funcs->hubp_setup2)
+ hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing);
+}
+
+void hwss_hubp_setup(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest;
+
+ if (hubp && hubp->funcs->hubp_setup)
+ hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest);
+}
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp;
+ bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req;
+
+ if (hubp && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, unbounded_req);
+}
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent2)
+ hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs);
+}
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent)
+ hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs);
+}
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp;
+ enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix;
+
+ if (dpp && dpp->funcs->set_cursor_matrix)
+ dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix);
+}
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params)
+{
+ struct dc *dc = params->mpc_update_mpcc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.update_mpcc)
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+}
+
+void hwss_mpc_update_blending(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_update_blending_params.mpc;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_update_blending_params.blnd_cfg;
+ int mpcc_id = params->mpc_update_blending_params.mpcc_id;
+
+ if (mpc && mpc->funcs->update_blending)
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
+}
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc;
+ int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id;
+
+ if (mpc && mpc->funcs->wait_for_idle)
+ mpc->funcs->wait_for_idle(mpc, mpcc_id);
+}
+
+void hwss_mpc_insert_plane(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_insert_plane_params.mpc;
+ struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_insert_plane_params.blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg;
+ struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc;
+ int mpcc_id = params->mpc_insert_plane_params.mpcc_id;
+ int dpp_id = params->mpc_insert_plane_params.dpp_id;
+
+ if (mpc && mpc->funcs->insert_plane)
+ mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc,
+ dpp_id, mpcc_id);
+}
+
+void hwss_dpp_set_scaler(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_scaler_params.dpp;
+ const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data;
+
+ if (dpp && dpp->funcs->dpp_set_scaler)
+ dpp->funcs->dpp_set_scaler(dpp, scl_data);
+}
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp;
+ const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport;
+ const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c;
+
+ if (hubp && hubp->funcs->mem_program_viewport)
+ hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
+}
+
+void hwss_set_cursor_attribute(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_attribute_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_attribute)
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+}
+
+void hwss_set_cursor_position(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_position_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_position)
+ dc->hwss.set_cursor_position(pipe_ctx);
+}
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_sdr_white_level_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+}
+
+void hwss_program_output_csc(union block_sequence_params *params)
+{
+ struct dc *dc = params->program_output_csc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx;
+ enum dc_color_space colorspace = params->program_output_csc_params.colorspace;
+ uint16_t *matrix = params->program_output_csc_params.matrix;
+ int opp_id = params->program_output_csc_params.opp_id;
+
+ if (dc && dc->hwss.program_output_csc)
+ dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id);
+}
+
+void hwss_hubp_set_blank(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_params.hubp;
+ bool blank = params->hubp_set_blank_params.blank;
+
+ if (hubp && hubp->funcs->set_blank)
+ hubp->funcs->set_blank(hubp, blank);
+}
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp;
+
+ if (hubp && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP;
+ seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC update blending to block sequence
+ */
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC insert plane to block sequence
+ */
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add MPC assert idle MPCC to block sequence
+ */
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/**
+ * Helper function to add HUBP set blank to block sequence
+ */
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC;
+ seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_INIT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP;
+ seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count;
+ (*seq_state->num_steps)++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index a180f68f711c..deb23d20bca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ enum engine_id encs_assigned[MAX_LINK_ENCODERS];
int i;
- for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ for (i = 0; i < MAX_LINK_ENCODERS; i++)
encs_assigned[i] = ENGINE_ID_UNKNOWN;
/* Add assigned encoders to list. */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index bc5dedf5f60c..dc0c4065a92c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -95,7 +95,7 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
@@ -446,6 +446,14 @@ bool resource_construct(
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
+
+ for (i = 0; i < caps->num_analog_stream_encoder; i++) {
+ pool->stream_enc[caps->num_stream_encoder + i] =
+ create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
+ if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
+ DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
+ pool->stream_enc_count++;
+ }
}
pool->hpo_dp_stream_enc_count = 0;
@@ -2690,17 +2698,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link)
}
static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
- const struct dc_link *link, const struct resource_pool *pool)
+ const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
{
- int i;
+ int i, j = -1;
+ int stream_enc_inst = -1;
int enc_count = pool->dig_link_enc_count;
- /* for dpia, check preferred encoder first and then the next one */
- for (i = 0; i < enc_count; i++)
- if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
- break;
+ /* Find stream encoder instance for the stream */
+ if (stream) {
+ for (i = 0; i < pool->pipe_count; i++) {
+ if ((res_ctx->pipe_ctx[i].stream == stream) &&
+ (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
+ stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
+ break;
+ }
+ }
+ }
+
+ /* Assign dpia preferred > stream enc instance > available */
+ for (i = 0; i < enc_count; i++) {
+ if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
+ if (j == -1)
+ j = i;
+
+ if (link->dpia_preferred_eng_id == i) {
+ j = i;
+ break;
+ }
- return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
+ if (stream_enc_inst == i) {
+ j = stream_enc_inst;
+ }
+ }
+ }
+ return j;
}
static inline void acquire_dio_link_enc(
@@ -2781,7 +2812,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
retain_dio_link_enc(res_ctx, enc_index);
} else {
if (stream->link->is_dig_mapping_flexible)
- enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
else {
int link_index = 0;
@@ -2791,7 +2822,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
* one into the acquiring link.
*/
if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
- int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
if (new_enc_index >= 0)
swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
@@ -5201,7 +5232,7 @@ struct link_encoder *get_temp_dio_link_enc(
enc_index = link->eng_id;
if (enc_index < 0)
- enc_index = find_free_dio_link_enc(res_ctx, link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
if (enc_index >= 0)
link_enc = pool->link_encoders[enc_index];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index c61300a7cb1c..2de8ef4a58ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -35,8 +35,8 @@
#include "link_enc_cfg.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_wrapper.h"
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_wrapper.h"
+#include "dml2_0/dml2_internal_types.h"
#endif
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9ac2d41f8fca..6d309c320253 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
return dc_state_get_stream_status(dc->current_state, stream);
}
+const struct dc_stream_status *dc_stream_get_status_const(
+ const struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+
+ return dc_state_get_stream_status(dc->current_state, stream);
+}
+
void program_cursor_attributes(
struct dc *dc,
struct dc_stream_state *stream)
@@ -231,6 +239,7 @@ void program_cursor_attributes(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -245,9 +254,14 @@ void program_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+ }
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -255,12 +269,18 @@ void program_cursor_attributes(
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
}
if (pipe_to_program) {
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ }
}
}
@@ -366,6 +386,7 @@ void program_cursor_position(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -384,16 +405,27 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
+
if (dc->ctx->dmub_srv)
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ }
}
bool dc_stream_set_cursor_position(
@@ -855,9 +887,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
DC_LOG_DC(
- "\tdispname: %s signal: %x\n",
+ "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n",
+ stream->signal,
stream->sink->edid_caps.display_name,
- stream->signal);
+ stream->sink->edid_caps.manufacturer_id,
+ stream->sink->edid_caps.product_id);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 98f0b6b3c213..010d9315b96b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
@@ -54,8 +54,16 @@ struct abm_save_restore;
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
+struct dcn_hubbub_reg_state;
+struct dcn_hubp_reg_state;
+struct dcn_dpp_reg_state;
+struct dcn_mpc_reg_state;
+struct dcn_opp_reg_state;
+struct dcn_dsc_reg_state;
+struct dcn_optc_reg_state;
+struct dcn_dccg_reg_state;
-#define DC_VER "3.2.351"
+#define DC_VER "3.2.356"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -278,6 +286,15 @@ struct dc_scl_caps {
bool sharpener_support;
};
+struct dc_check_config {
+ /**
+ * max video plane width that can be safely assumed to be always
+ * supported by single DPP pipe.
+ */
+ unsigned int max_optimizable_video_width;
+ bool enable_legacy_fast_update;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -293,11 +310,6 @@ struct dc_caps {
unsigned int max_cursor_size;
unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
- /*
- * max video plane width that can be safely assumed to be always
- * supported by single DPP pipe.
- */
- unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
@@ -455,6 +467,19 @@ enum surface_update_type {
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
+enum dc_lock_descriptor {
+ LOCK_DESCRIPTOR_NONE = 0x0,
+ LOCK_DESCRIPTOR_STATE = 0x1,
+ LOCK_DESCRIPTOR_LINK = 0x2,
+ LOCK_DESCRIPTOR_STREAM = 0x4,
+ LOCK_DESCRIPTOR_PLANE = 0x8,
+};
+
+struct surface_update_descriptor {
+ enum surface_update_type update_type;
+ enum dc_lock_descriptor lock_descriptor;
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
@@ -530,6 +555,7 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ bool enable_cursor_offload;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
};
@@ -849,8 +875,7 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
- uint32_t enable_bw_allocation_mode:1; /* bit 7 */
- uint32_t reserved:24;
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -1120,7 +1145,6 @@ struct dc_debug_options {
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
- bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
@@ -1152,7 +1176,6 @@ struct dc_debug_options {
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
- bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
@@ -1164,6 +1187,7 @@ struct dc_debug_options {
unsigned int auxless_alpm_lfps_t1t2_us;
short auxless_alpm_lfps_t1t2_offset_us;
bool disable_stutter_for_wm_program;
+ bool enable_block_sequence_programming;
};
@@ -1702,6 +1726,7 @@ struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
+ struct dc_check_config check_config;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
@@ -1830,20 +1855,14 @@ struct dc_surface_update {
};
struct dc_underflow_debug_data {
- uint32_t otg_inst;
- uint32_t otg_underflow;
- uint32_t h_position;
- uint32_t v_position;
- uint32_t otg_frame_count;
- struct dc_underflow_per_hubp_debug_data {
- uint32_t hubp_underflow;
- uint32_t hubp_in_blank;
- uint32_t hubp_readline;
- uint32_t det_config_error;
- } hubps[MAX_PIPES];
- uint32_t curr_det_sizes[MAX_PIPES];
- uint32_t target_det_sizes[MAX_PIPES];
- uint32_t compbuf_config_error;
+ struct dcn_hubbub_reg_state *hubbub_reg_state;
+ struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
+ struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
+ struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
+ struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
+ struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
+ struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
+ struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
};
/*
@@ -2721,6 +2740,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+void dc_log_preos_dmcub_info(const struct dc *dc);
+
/* DSC Interfaces */
#include "dc_dsc.h"
@@ -2736,7 +2757,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
bool dc_is_cursor_limit_pending(struct dc *dc);
-bool dc_can_clear_cursor_limit(struct dc *dc);
+bool dc_can_clear_cursor_limit(const struct dc *dc);
/**
* dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 5fa5e2b63fb7..40d7a7d83c40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -91,9 +91,17 @@ struct dc_vbios_funcs {
struct device_id id);
/* COMMANDS */
+ enum bp_result (*select_crtc_source)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*encoder_control)(
struct dc_bios *bios,
struct bp_encoder_control *cntl);
+ enum bp_result (*dac_load_detection)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
@@ -165,6 +173,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 53a088ebddef..fffbf1983143 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
- int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
@@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
if (should_manage_pstate) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
- pipe_idx++;
}
}
@@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
bool enable)
{
uint8_t cmd_pipe_index = 0;
- uint32_t i, pipe_idx;
+ uint32_t i;
uint8_t subvp_count = 0;
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
@@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (enable) {
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
@@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
}
- pipe_idx++;
}
if (subvp_count == 2) {
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
@@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
}
+void dc_dmub_srv_cursor_offload_init(struct dc *dc)
+{
+ struct dmub_rb_cmd_cursor_offload_init *init;
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc->config.enable_cursor_offload)
+ return;
+
+ if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_v1)
+ return;
+
+ if (!dc_dmub_srv->dmub->shared_state)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ init = &cmd.cursor_offload_init;
+ init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
+ init->header.payload_bytes = sizeof(init->init_data);
+ init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
+ init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ dc_dmub_srv->cursor_offload_enabled = true;
+}
+
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable)
+{
+ struct pipe_ctx const *pipe_ctx;
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!stream)
+ return;
+
+ pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type =
+ enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+
+ cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
+ cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
+ stream->timing.pix_clk_100hz / 10));
+
+ cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
+ stream->adjust.v_total_max :
+ stream->timing.v_total;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
+ enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!pipe || !pipe->stream || !pipe->stream_res.tg)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+ cntl->data.otg_inst = pipe->stream_res.tg->inst;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx;
@@ -2231,6 +2322,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin
return result;
}
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
+{
+ return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
+}
+
void dc_dmub_srv_release_hw(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
@@ -2248,3 +2344,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc)
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ dmub = dc_dmub_srv->dmub;
+
+ if (dmub_srv_get_preos_info(dmub)) {
+ DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
+ DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
+ DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
+ DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
+ DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
+ DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
+ DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
+ DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7ef93444ef3c..72e0a41f39f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,6 +56,7 @@ struct dc_dmub_srv {
union dmub_shared_state_ips_driver_signals driver_signals;
bool idle_allowed;
bool needs_idle_wake;
+ bool cursor_offload_enabled;
};
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
@@ -326,9 +327,51 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t
enum ips_residency_mode ips_mode);
/**
+ * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ */
+void dc_dmub_srv_cursor_offload_init(struct dc *dc);
+
+/**
+ * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ * @context: the DC context to reference for pipe allocations
+ * @stream: the stream to control
+ * @enable: true to enable cursor offload, false to disable
+ */
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable);
+
+/**
+ * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe.
+ *
+ * @dc: pointer to DC object
+ * @pipe: top-most pipe for a stream.
+ */
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe);
+
+/**
+ * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported.
+ *
+ * @dc: pointer to DC object
+ *
+ * Return: true if cursor offload is supported, false otherwise
+ */
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
+
+/**
* dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
* @dc - pointer to DC object
*/
void dc_dmub_srv_release_hw(const struct dc *dc);
+
+/**
+ * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
+ *
+ * @dc - pointer to DC object
+ */
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 55704d4457ef..37d1a79e8241 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->prefer_easf = false;
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
+ else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3)
+ spl_in->override_easf = true;
/* Translate adaptive sharpening preference */
unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 76cf9fdedab0..321cfe92d799 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -473,12 +473,11 @@ void dc_enable_stereo(
/* Triggers multi-stream synchronization. */
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
+ struct dc_stream_update *stream_update);
/**
* Create a new default stream for the requested sink
@@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
+struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
+const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b5aa03a3e39c..ea6b71c43d2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -185,6 +185,10 @@ struct dc_panel_patch {
unsigned int wait_after_dpcd_poweroff_ms;
};
+/**
+ * struct dc_edid_caps - Capabilities read from EDID.
+ * @analog: Whether the monitor is analog. Used by DVI-I handling.
+ */
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -212,6 +216,8 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
bool rr_capable;
+ bool scdc_present;
+ bool analog;
struct dc_panel_patch panel_patch;
};
@@ -347,7 +353,8 @@ enum dc_connection_type {
dc_connection_none,
dc_connection_single,
dc_connection_mst_branch,
- dc_connection_sst_branch
+ dc_connection_sst_branch,
+ dc_connection_dac_load
};
struct dc_csc_adjustments {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
index 5999b2da3a01..33d8bd91cb01 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
@@ -148,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index a9b88f5e0c04..8bdffd9ff31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -425,7 +425,69 @@ struct dccg_mask {
uint32_t SYMCLKD_CLOCK_ENABLE; \
uint32_t SYMCLKE_CLOCK_ENABLE; \
uint32_t DP_DTO_MODULO[MAX_PIPES]; \
- uint32_t DP_DTO_PHASE[MAX_PIPES]
+ uint32_t DP_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
+ uint32_t DCCG_AUDIO_DTO0_MODULE; \
+ uint32_t DCCG_AUDIO_DTO0_PHASE; \
+ uint32_t DCCG_AUDIO_DTO1_MODULE; \
+ uint32_t DCCG_AUDIO_DTO1_PHASE; \
+ uint32_t DCCG_CAC_STATUS; \
+ uint32_t DCCG_CAC_STATUS2; \
+ uint32_t DCCG_DISP_CNTL_REG; \
+ uint32_t DCCG_DS_CNTL; \
+ uint32_t DCCG_DS_DTO_INCR; \
+ uint32_t DCCG_DS_DTO_MODULO; \
+ uint32_t DCCG_DS_HW_CAL_INTERVAL; \
+ uint32_t DCCG_GTC_CNTL; \
+ uint32_t DCCG_GTC_CURRENT; \
+ uint32_t DCCG_GTC_DTO_INCR; \
+ uint32_t DCCG_GTC_DTO_MODULO; \
+ uint32_t DCCG_PERFMON_CNTL; \
+ uint32_t DCCG_PERFMON_CNTL2; \
+ uint32_t DCCG_SOFT_RESET; \
+ uint32_t DCCG_TEST_CLK_SEL; \
+ uint32_t DCCG_VSYNC_CNT_CTRL; \
+ uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
+ uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
+ uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DP_DTO_DBUF_EN; \
+ uint32_t DPIACLK_540M_DTO_MODULO; \
+ uint32_t DPIACLK_540M_DTO_PHASE; \
+ uint32_t DPIACLK_810M_DTO_MODULO; \
+ uint32_t DPIACLK_810M_DTO_PHASE; \
+ uint32_t DPIACLK_DTO_CNTL; \
+ uint32_t DPIASYMCLK_CNTL; \
+ uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CNTL; \
+ uint32_t DTBCLK_DTO_DBUF_EN; \
+ uint32_t FORCE_SYMCLK_DISABLE; \
+ uint32_t HDMICHARCLK0_CLOCK_CNTL; \
+ uint32_t MICROSECOND_TIME_BASE_DIV; \
+ uint32_t MILLISECOND_TIME_BASE_DIV; \
+ uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG0_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PIXEL_RATE_CNTL; \
+ uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
+ uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_PSP_CNTL
+
struct dccg_registers {
DCCG_REG_VARIABLE_LIST;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 8664f0c4c9b7..97df04b7e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
+ dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
+ dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
+ dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
+ dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
+ dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
+ dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
+ dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
+ dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
+ dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
+ dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
+ dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
+ dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
+ dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
+ dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
+ dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
+ dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
+ dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
+ dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
+ dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
+ dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
+ dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
+ dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
+ dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
+ dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
+ dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
+ dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
+ dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
+ dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
+ dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
+ dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
+ dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
+ dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
+ dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
+ dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
+ dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
+ dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
+ dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
+ dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
+ dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
+ dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
+ dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
+ dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
+ dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
+ dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
+ dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
+ dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
+ dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
+ dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
+ dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
+ dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
+ dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
+ dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
+ dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
+ dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
+ dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
+ dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
+ dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
+ dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
+ dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
+ dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
+ dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
+ dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
+ dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
+ dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
+ dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
+ dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
+ dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
+ dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
+ dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
+ dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
+ dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
+ dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
+ dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
+ dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
+ dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
+ dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
+ dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
+ dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
+ dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
+ dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
+ dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
+ dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
+ dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
+}
+
static const struct dccg_funcs dccg31_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
@@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index cd261051dc2c..bf659920d4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
+
#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
index 8f6edd8e9beb..ef3db6beba25 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
@@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
- .set_dtbclk_p_src = dccg314_set_dtbclk_p_src
+ .set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg314_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index 60ea1d248deb..a609635f35db 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -74,8 +74,7 @@
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
+ SR(DTBCLK_P_CNTL)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index de6d62401362..bd2f528137b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -1114,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
+static void dccg35_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
@@ -1174,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
} else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
- /*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ udelay(10);
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1300,6 +1310,8 @@ static void dccg35_set_pixel_rate_div(
BREAK_TO_DEBUGGER();
return;
}
+ if (otg_inst < 4)
+ dccg35_wait_for_dentist_change_done(dccg);
}
static void dccg35_set_dtbclk_p_src(
@@ -1411,7 +1423,7 @@ static void dccg35_set_dtbclk_dto(
__func__, params->otg_inst, params->pixclk_khz,
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
- } else {
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
@@ -1664,7 +1676,7 @@ static void dccg35_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
@@ -1682,9 +1694,12 @@ static void dccg35_dpp_root_clock_control(
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
/*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ // wait for clock to fully ramp
+ udelay(10);
+
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
@@ -2438,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 51f98c5c51c4..7b9c36456cd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -41,8 +41,9 @@
SR(SYMCLKA_CLOCK_ENABLE),\
SR(SYMCLKB_CLOCK_ENABLE),\
SR(SYMCLKC_CLOCK_ENABLE),\
- SR(SYMCLKD_CLOCK_ENABLE),\
- SR(SYMCLKE_CLOCK_ENABLE)
+ SR(SYMCLKD_CLOCK_ENABLE), \
+ SR(SYMCLKE_CLOCK_ENABLE),\
+ SR(SYMCLK_PSP_CNTL)
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -231,6 +232,14 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 0b8ed9b94d3c..663a18ee5162 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
.enable_symclk_se = dccg401_enable_symclk_se,
.disable_symclk_se = dccg401_disable_symclk_se,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg401_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index a6006776333d..2dcf394edf22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a8e79104b684..5f8fba45d98d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 0c50fe266c8a..87dbb8d7ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -302,6 +302,10 @@ static void setup_panel_mode(
if (ctx->dc->caps.psp_setup_panel_mode)
return;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
return true;
}
+static bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* When the VBIOS doesn't specify any limits, use 400 MHz.
+ * The value comes from amdgpu_atombios_get_clock_info.
+ */
+ uint32_t max_pixel_clock_khz = 400000;
+
+ if (enc110->base.ctx->dc_bios->fw_info_valid &&
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
+ max_pixel_clock_khz =
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
+ }
+
+ if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
+ return false;
+
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
+ return false;
+
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ return true;
+}
+
void dce110_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
is_valid = dce110_link_encoder_validate_dp_output(
enc110, &stream->timing);
break;
+ case SIGNAL_TYPE_RGB:
+ is_valid = dce110_link_encoder_validate_rgb_output(
+ enc110, &stream->timing);
+ break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
@@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
@@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
+ case SIGNAL_TYPE_RGB:
+ break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
@@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
+ switch (enc->analog_engine) {
+ case ENGINE_ID_DACA:
+ REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
+ break;
+ case ENGINE_ID_DACB:
+ /* DACB doesn't seem to be present on DCE6+,
+ * although there are references to it in the register file.
+ */
+ DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
+ break;
+ default:
+ break;
+ }
+
+ /* The code below only applies to connectors that support digital signals. */
+ if (enc->transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
@@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 261c70e01e33..c58b69bc319b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -101,18 +101,21 @@
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id)
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SR(DAC_ENABLE)
#endif
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- LE_COMMON_REG_LIST_BASE(id)
+ LE_COMMON_REG_LIST_BASE(id), \
+ SR(DAC_ENABLE)
#define LE_DCE100_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SR(DCI_MEM_PWR_STATUS)
+ SR(DCI_MEM_PWR_STATUS), \
+ SR(DAC_ENABLE)
#define LE_DCE110_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
@@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+
+ /* DAC registers */
+ uint32_t DAC_ENABLE;
};
struct dce110_link_encoder {
@@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
-bool dce110_link_encoder_validate_rgb_output(
- const struct dce110_link_encoder *enc110,
- const struct dc_crtc_timing *crtc_timing);
-
bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1130d7619b26..f8996ee2856b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct(
enc110->se_shift = se_shift;
enc110->se_mask = se_mask;
}
+
+static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {0};
+
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id)
+{
+ enc110->base.funcs = &dce110_an_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index cc5020a8e1e1..068de1392121 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
const struct dce_stream_encoder_shift *se_shift,
const struct dce_stream_encoder_mask *se_mask);
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id);
void dce110_se_audio_mute_control(
struct stream_encoder *enc, bool mute);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d37ecfdde4f1..39f5fa73c43e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -61,27 +61,30 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
-bool should_use_dmub_lock(struct dc_link *link)
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link)
{
/* ASIC doesn't support DMUB */
- if (!link->ctx->dmub_srv)
+ if (!dc->ctx->dmub_srv)
return false;
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
+ if (link) {
- if (link->replay_settings.replay_feature_enabled)
- return true;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ return true;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return true;
- /* only use HW lock for PSR1 on single eDP */
- if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
- struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
- dc_get_edp_links(link->dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
- if (edp_num == 1)
- return true;
+ if (edp_num == 1)
+ return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 5a72b168fb4a..9f53d2ea5fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -37,6 +37,14 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
-bool should_use_dmub_lock(struct dc_link *link);
+/**
+ * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox1 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
deleted file mode 100644
index 4c21ce42054c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ /dev/null
@@ -1,141 +0,0 @@
-# SPDX-License-Identifier: MIT */
-#
-# Copyright 2023 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dml2.
-
-dml2_ccflags := $(CC_FLAGS_FPU)
-dml2_rcflags := $(CC_FLAGS_NO_FPU)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
- ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
- frame_warn_limit := 4096
- else
- frame_warn_limit := 3072
- endif
- else
- frame_warn_limit := 2048
- endif
-
- ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
- frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
- endif
-endif
-
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
-
-DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
- dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
- dml_display_rq_dlg_calc.o
-
-AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-
-DML21 := src/dml2_top/dml2_top_interfaces.o
-DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/dml2_core/dml2_core_dcn4.o
-DML21 += src/dml2_core/dml2_core_utils.o
-DML21 += src/dml2_core/dml2_core_factory.o
-DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
-DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
-DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
-DML21 += src/dml2_mcg/dml2_mcg_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
-DML21 += src/dml2_standalone_libraries/lib_float_math.o
-DML21 += dml21_translation_helper.o
-DML21 += dml21_wrapper.o
-DML21 += dml21_utils.o
-
-AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
new file mode 100644
index 000000000000..97e068b6bf6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT */
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+# Makefile for dml2.
+
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2056
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+
+DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
+ dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
+ dml_display_rq_dlg_calc.o
+
+AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
index e450445bc05d..b954c9648fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
@@ -53,17 +53,17 @@ typedef const void *const_pvoid;
typedef const char *const_pchar;
typedef struct rgba_struct {
- uint8 a;
- uint8 r;
- uint8 g;
- uint8 b;
+ uint8 a;
+ uint8 r;
+ uint8 g;
+ uint8 b;
} rgba_t;
typedef struct {
- uint8 blue;
- uint8 green;
- uint8 red;
- uint8 alpha;
+ uint8 blue;
+ uint8 green;
+ uint8 red;
+ uint8 alpha;
} gen_color_t;
typedef union {
@@ -87,7 +87,7 @@ typedef union {
} uintfloat64;
#ifndef UNREFERENCED_PARAMETER
-#define UNREFERENCED_PARAMETER(x) x = x
+#define UNREFERENCED_PARAMETER(x) (x = x)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 4b9b2e84d381..c468f492b876 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
}
+
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
{ \
dml_uint_t plane_idx; \
@@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
index a38ed89c47a9..a38ed89c47a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
index dbeb08466092..3b1d92e7697f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
@@ -274,7 +274,6 @@ enum dml_clk_cfg_policy {
dml_use_state_freq = 2
};
-
struct soc_state_bounding_box_st {
dml_float_t socclk_mhz;
dml_float_t dscclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
index 14d389525296..e574c81edf5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
@@ -52,7 +52,7 @@
#define __DML_VBA_DEBUG__
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
-#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
+#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
index 89890c88fd66..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
index 113b0265e1d1..a82b49cf7fb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
@@ -30,7 +30,6 @@
#include "display_mode_core_structs.h"
#include "cmntypes.h"
-
#include "dml_assert.h"
#include "dml_logging.h"
@@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index bf5e7f4e0416..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 9880d3e0398e..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index ee721606b883..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
index 4bff52eaaef8..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 08f7f03b1023..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -224,9 +224,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
/* Populate stream, plane mappings and other fields in display config. */
- DC_FP_START();
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- DC_FP_END();
if (!result)
return false;
@@ -281,9 +279,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
mode_support->dml2_instance = dml_init->dml2_instance;
- DC_FP_START();
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- DC_FP_END();
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
index 15f92029d2e5..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 793e1c038efd..16a4f97bca4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
index 281d7ad230d8..281d7ad230d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
index a64ec4dcf11a..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
index 91955bbe24b8..8e5a30287220 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
@@ -46,7 +46,6 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
- uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index e8dc6471c0be..13749c9fcf18 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -49,6 +49,11 @@ enum dml2_source_format_class {
dml2_422_packed_12 = 18
};
+enum dml2_sample_positioning {
+ dml2_interstitial = 0,
+ dml2_cosited = 1
+};
+
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
@@ -222,7 +227,11 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool easf_enabled;
+ bool isharp_enabled;
bool upsp_enabled;
+ enum dml2_sample_positioning upsp_sample_positioning;
+ unsigned int upsp_vtaps;
struct {
double h_ratio;
double v_ratio;
@@ -385,6 +394,7 @@ struct dml2_plane_parameters {
// The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override.
long reserved_vblank_time_ns;
unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
+ unsigned int vactive_latency_to_hide_for_pstate_admissibility_us;
unsigned int gpuvm_min_page_size_kbytes;
unsigned int hostvm_min_page_size_kbytes;
@@ -456,6 +466,7 @@ struct dml2_display_cfg {
bool enable;
bool value;
} force_nom_det_size_kbytes;
+
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
index 8f624a912e78..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
index 176f55947664..4a9a0d5a09b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
@@ -145,6 +145,8 @@ struct dml2_soc_bb {
struct dml2_soc_vmin_clock_limits vmin_limit;
double lower_bound_bandwidth_dchub;
+ double fraction_of_urgent_bandwidth_nominal_target;
+ double fraction_of_urgent_bandwidth_flip_target;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
@@ -170,6 +172,7 @@ struct dml2_soc_bb {
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
+ unsigned int TDLUT_33cube_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
@@ -188,7 +191,9 @@ struct dml2_ip_capabilities {
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
-
+ unsigned int ppt_max_allow_delay_ns;
+ unsigned int temp_read_max_allow_delay_us;
+ unsigned int dummy_pstate_max_allow_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 41adb1104d0f..8646ce5f1c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -70,6 +70,8 @@ struct dml2_pmo_options {
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
+ struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES];
+ unsigned int num_override_strategies_per_list[DML2_MAX_PLANES];
};
struct dml2_options {
@@ -193,6 +195,14 @@ struct dml2_mcache_surface_allocation {
} informative;
};
+enum dml2_pstate_type {
+ dml2_pstate_type_uclk,
+ dml2_pstate_type_ppt,
+ dml2_pstate_type_temp_read,
+ dml2_pstate_type_dummy_pstate,
+ dml2_pstate_type_count
+};
+
enum dml2_pstate_method {
dml2_pstate_method_na = 0,
/* hw exclusive modes */
@@ -310,6 +320,7 @@ struct dml2_mode_support_info {
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
+ bool NumberOfTDLUT33cubeSupport;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
@@ -357,6 +368,8 @@ struct dml2_mode_support_info {
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
bool temp_read_or_ppt_support;
+ bool qos_bandwidth_support;
+ bool dcfclk_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index 6ee37386f672..eba948e187c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
+ .max_num_opp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index bf62d42b3f78..f809c4073b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1303,6 +1303,7 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 16;
} else {
+
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
@@ -1320,6 +1321,7 @@ static double TruncToValidBPP(
MaxDSCBPP = 16;
}
}
+
if (Output == dml2_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
} else if (DSCEnable && Output == dml2_dp) {
@@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
bool UseDSC,
unsigned int NumberOfDSCSlices,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double DISPCLKRequired,
unsigned int NumberOfDPPRequired,
unsigned int MaxHActiveForDSC,
@@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
if (DISPCLKRequired > MaxDispclk)
return false;
- if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
+ if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
return false;
if (are_odm_segments_symmetrical) {
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
@@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode(
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double PixelClock,
unsigned int NumberOfDSCSlices,
@@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode(
UseDSC,
NumberOfDSCSlices,
TotalNumberOfActiveDPP,
+ TotalNumberOfActiveOPP,
MaxNumDPP,
+ MaxNumOPP,
DISPCLKRequired,
NumberOfDPPRequired,
MaxHActiveForDSC,
@@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
mode_lib->ms.support.TotalAvailablePipesSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
@@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
false, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
true, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
+ mode_lib->ms.NoOfOPP[k] = 1;
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 4;
+ mode_lib->ms.NoOfOPP[k] = 4;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 3;
+ mode_lib->ms.NoOfOPP[k] = 3;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 2;
+ mode_lib->ms.NoOfOPP[k] = 2;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
@@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
}
}
#if defined(__DML_VBA_DEBUG__)
@@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
+ mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
+ mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
+ }
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
mode_lib->ms.support.TotalAvailablePipesSupport = false;
+ if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
+ mode_lib->ms.support.TotalAvailablePipesSupport = false;
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
@@ -12756,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
- const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index];
struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
@@ -12771,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* from display configuration */
base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
base_programming->otg_vline_time_ns =
- (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0);
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
- stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines);
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal;
/* from core */
base_programming->config.bits.min_ttu_vblank_usable = true;
@@ -12807,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vactive */
base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_vblank:
@@ -12819,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vblank */
base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_fw_drr:
/* drr */
base_programming->type = FAMS2_STREAM_TYPE_DRR;
sub_programming->drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines;
sub_programming->drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
sub_programming->drr.only_stretch_if_required = true;
@@ -12847,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
(uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
sub_programming->subvp.vratio_denominator = 1000;
sub_programming->subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines;
sub_programming->subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
sub_programming->subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal;
sub_programming->subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive;
sub_programming->subvp.config.bits.is_multi_planar =
plane_descriptor->surface.plane1.height > 0;
sub_programming->subvp.config.bits.is_yuv420 =
@@ -12862,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
plane_descriptor->pixel_format == dml2_420_12;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_reserved_hw:
@@ -13027,7 +13050,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
+ out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport;
out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
+ out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support;
+ out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support;
for (k = 0; k < out->display_config.num_planes; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index 27ef0e096b25..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index 640087e862f8..cc4f0663c6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -15,6 +15,8 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
memset(out, 0, sizeof(struct dml2_core_instance));
+ out->project_id = project_id;
+
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
index 411c514fe65c..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
index ffb8c09f37a5..051c31ec2f0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -36,7 +36,9 @@ struct dml2_core_ip_params {
unsigned int max_line_buffer_lines;
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int max_num_dpp;
+ unsigned int max_num_opp;
unsigned int max_num_otg;
+ unsigned int TDLUT_33cube_count;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
@@ -46,6 +48,7 @@ struct dml2_core_ip_params {
double max_vscl_ratio;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
+ unsigned int odm_combine_support_mask;
unsigned int num_dsc;
unsigned int maximum_dsc_bits_per_component;
unsigned int maximum_pixels_per_line_per_dsc_unit;
@@ -82,7 +85,6 @@ struct dml2_core_ip_params {
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
-
// MRQ
bool dcn_mrq_present;
unsigned int zero_size_buffer_entries;
@@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe {
unsigned int DPPPerSurface;
bool ScalerEnabled;
bool UPSPEnabled;
+ unsigned int UPSPVTaps;
+ enum dml2_sample_positioning UPSPSamplePositioning;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -230,6 +234,7 @@ struct dml2_core_internal_mode_support_info {
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
+ bool NumberOfTDLUT33cubeSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
@@ -566,6 +571,7 @@ struct dml2_core_internal_mode_support {
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
unsigned int NoOfDPP[DML2_MAX_PLANES];
+ unsigned int NoOfOPP[DML2_MAX_PLANES];
bool MPCCombine[DML2_MAX_PLANES];
double dcfclk_deepsleep;
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
@@ -576,6 +582,7 @@ struct dml2_core_internal_mode_support {
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
unsigned int TotalNumberOfActiveDPP;
+ unsigned int TotalNumberOfActiveOPP;
unsigned int TotalNumberOfSingleDPPSurfaces;
unsigned int TotalNumberOfDCCActiveDPP;
unsigned int Total3dlutActive;
@@ -1306,7 +1313,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
- enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method *pstate_switch_modes;
// Output
bool *PTEBufferSizeNotExceeded;
@@ -2308,6 +2315,7 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
+ enum dml2_project_id project_id;
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2320,6 +2328,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
+ enum dml2_project_id project_id;
struct dml2_display_cfg_programming *programming;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index 5f301befed16..5f301befed16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 22969a533a7b..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index dfd01440737d..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 20ba2e446f1d..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index a265f254152c..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 02da6f45cbf7..f54fde8fba90 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
index ad307deca3b0..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index 1b9579a32ff2..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index f00bd9e72a86..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index d88b3e0082dd..5769c2638f9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
+ unsigned int base_list_size = 0;
+ const struct dml2_pmo_pstate_strategy *base_list = NULL;
+ unsigned int *expanded_list_size = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_list = NULL;
+
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
@@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
pmo->options = in_out->options;
/* generate permutations of p-state configs from base strategy list */
- for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
- switch (i) {
+ for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
+ switch (i+1) {
case 1:
- DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_1_display,
- base_strategy_list_1_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_1_display;
+ base_list_size = base_strategy_list_1_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
+
break;
case 2:
- DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_2_display,
- base_strategy_list_2_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_2_display;
+ base_list_size = base_strategy_list_2_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
+
break;
case 3:
- DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_3_display,
- base_strategy_list_3_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_3_display;
+ base_list_size = base_strategy_list_3_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
+
break;
case 4:
- DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_4_display,
- base_strategy_list_4_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_4_display;
+ base_list_size = base_strategy_list_4_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
+
break;
}
+
+ DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+
+ /* populate list */
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_list,
+ base_list_size,
+ i + 1,
+ expanded_list,
+ expanded_list_size);
}
return true;
@@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
return synchronizable;
}
-static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
+static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
{
- return fams2_meta->contention_delay_otg_vlines +
- fams2_meta->method_subvp.programming_delay_otg_vlines +
- fams2_meta->method_subvp.phantom_vtotal +
- fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- fams2_meta->dram_clk_change_blackout_otg_vlines;
+ return pstate_meta->contention_delay_otg_vlines +
+ pstate_meta->method_subvp.programming_delay_otg_vlines +
+ pstate_meta->method_subvp.phantom_vtotal +
+ pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ pstate_meta->blackout_otg_vlines;
}
static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
@@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
- if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
+ if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
return false;
}
/* check rr is within bounds */
- if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
+ if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
return false;
}
/* check required stretch is allowed */
if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
- stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
+ stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
return false;
}
}
@@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
{
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
unsigned int microschedule_vlines;
unsigned int i;
unsigned int mcaches_per_plane;
@@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (stream_descriptor->overrides.disable_subvp) {
return false;
}
- microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
+ microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
/* block if using an interlaced timing */
if (stream_descriptor->timing.interlaced) {
@@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
* 2) refresh rate must be within the allowed bounds
*/
if (microschedule_vlines >= stream_descriptor->timing.v_active ||
- (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
+ (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
return false;
}
}
@@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta
}
static void build_method_scheduling_params(
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
- struct dml2_fams2_meta *stream_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
+ struct dml2_pstate_meta *stream_pstate_meta)
{
- stream_method_fams2_meta->allow_time_us =
- (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
- stream_fams2_meta->otg_vline_time_us;
- if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
+ stream_method_pstate_meta->allow_time_us =
+ (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
+ stream_pstate_meta->otg_vline_time_us;
+ if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
/* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
- stream_method_fams2_meta->disallow_time_us = 0.0;
+ stream_method_pstate_meta->disallow_time_us = 0.0;
} else {
- stream_method_fams2_meta->disallow_time_us =
- stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
+ stream_method_pstate_meta->disallow_time_us =
+ stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
}
}
-static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
+static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
switch (stream_pstate_method) {
case dml2_pstate_method_vactive:
case dml2_pstate_method_fw_vactive_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
break;
case dml2_pstate_method_vblank:
case dml2_pstate_method_fw_vblank_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
break;
case dml2_pstate_method_fw_svp:
case dml2_pstate_method_fw_svp_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
break;
case dml2_pstate_method_fw_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
break;
case dml2_pstate_method_reserved_hw:
case dml2_pstate_method_reserved_fw:
@@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
case dml2_pstate_method_count:
case dml2_pstate_method_na:
default:
- stream_method_fams2_meta = NULL;
+ stream_method_pstate_meta = NULL;
}
- return stream_method_fams2_meta;
+ return stream_method_pstate_meta;
}
static bool is_timing_group_schedulable(
@@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable(
const struct display_configuation_with_meta *display_cfg,
const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
- struct dml2_fams2_per_method_common_meta *group_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *group_pstate_meta)
{
unsigned int i;
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
unsigned int base_stream_idx = 0;
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
+ if (!stream_method_pstate_meta)
return false;
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
- group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
+ group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
+ if (!stream_method_pstate_meta)
continue;
- if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
}
- if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
/* set group allow end to smaller otg vline */
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
}
/* check waveform still has positive width */
- if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
/* timing group is not schedulable */
return false;
}
@@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable(
}
/* calculate the rest of the meta */
- build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
+ build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
- return group_fams2_meta->allow_time_us > 0.0 &&
- group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
+ return group_pstate_meta->allow_time_us > 0.0 &&
+ group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
}
static bool is_config_schedulable(
@@ -1354,7 +1377,7 @@ static bool is_config_schedulable(
double max_allow_delay_us = 0.0;
- memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
+ memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
/* search for a general solution to the schedule */
@@ -1369,12 +1392,12 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
}
- max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
+ max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
}
if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
@@ -1391,8 +1414,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
- double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
+ double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
+ double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
@@ -1410,19 +1433,19 @@ static bool is_config_schedulable(
* other display, or when >2 streams continue to halve the remaining allow time.
*/
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
- if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
+ if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
/* this timing group always allows */
continue;
}
- double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
+ double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
/* stream can't overlap itself */
- if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
+ if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
max_allow_time_us = math_min2(
- s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
- (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
+ s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
+ (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
if (max_allow_time_us < 0.0) {
/* failed exit early */
@@ -1450,8 +1473,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
- double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
+ double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
+ double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
@@ -1470,7 +1493,7 @@ static bool is_config_schedulable(
unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
- if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
+ if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
(s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
@@ -1492,18 +1515,18 @@ static bool is_config_schedulable(
/* default period_0 > period_1 */
unsigned int lrg_idx = 0;
unsigned int sml_idx = 1;
- if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
+ if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
/* period_0 < period_1 */
lrg_idx = 1;
sml_idx = 0;
}
- period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
- shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
- max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
- max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
+ period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
+ shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
+ max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
+ max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
if (shift_per_period > 0.0 &&
- shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
+ shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
schedulable = true;
}
@@ -1661,7 +1684,7 @@ static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_c
return max_vactive_fill_us;
}
-static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
+static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
@@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
/* worst case all other streams require some programming at the same time, 0 if only 1 stream */
unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
@@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(display_config->display_config.num_streams - 1);
/* common */
- stream_fams2_meta->valid = true;
- stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
- stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
- stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->nom_vtotal * timing->h_total);
- stream_fams2_meta->nom_frame_time_us =
- (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
- stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
+ stream_pstate_meta->valid = true;
+ stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
+ stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
+ stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->nom_vtotal * timing->h_total);
+ stream_pstate_meta->nom_frame_time_us =
+ (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
+ stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
} else {
/* assume min of 48Hz */
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
(48000000.0 * stream_descriptor->timing.h_total) * 1e9);
}
} else {
- stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
- }
- stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->max_vtotal * timing->h_total);
- stream_fams2_meta->max_frame_time_us =
- (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
-
- stream_fams2_meta->scheduling_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->contention_delay_otg_vlines =
- (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
+ }
+ stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->max_vtotal * timing->h_total);
+ stream_pstate_meta->max_frame_time_us =
+ (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
+
+ stream_pstate_meta->scheduling_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->contention_delay_otg_vlines =
+ (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
/* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
- stream_fams2_meta->allow_to_target_delay_otg_vlines =
- (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
- stream_fams2_meta->min_allow_width_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->allow_to_target_delay_otg_vlines =
+ (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
+ stream_pstate_meta->min_allow_width_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
/* this value should account for urgent latency */
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
+ stream_pstate_meta->blackout_otg_vlines =
(unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
- stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->otg_vline_time_us);
/* scheduling params should be built based on the worst case for allow_time:disallow_time */
/* vactive */
if (display_config->display_config.num_streams == 1) {
/* for single stream, guarantee at least an instant of allow */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
- (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
+ (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
}
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
- if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
- timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
+ timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
} else {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
}
- stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
+ stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
- stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
- stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
+ stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
+ stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
/* subvp */
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.phantom_vactive =
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.phantom_vactive =
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
stream_info->phantom_min_v_active;
- stream_fams2_meta->method_subvp.phantom_vfp =
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.phantom_vfp =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
/* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
- stream_fams2_meta->method_subvp.phantom_vtotal =
+ stream_pstate_meta->method_subvp.phantom_vtotal =
stream_info->phantom_v_startup +
- stream_fams2_meta->method_subvp.phantom_vfp +
+ stream_pstate_meta->method_subvp.phantom_vfp +
1 +
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vactive;
- stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
stream_descriptor->timing.v_blank_end +
- stream_fams2_meta->contention_delay_otg_vlines +
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vtotal +
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
+ stream_pstate_meta->contention_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vtotal +
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
+ stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
/* drr */
- stream_fams2_meta->method_drr.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->vblank_start +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_drr.common.allow_start_otg_vline =
+ stream_pstate_meta->vblank_start +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal * 2 +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- }
- stream_fams2_meta->method_drr.common.allow_end_otg_vline =
- stream_fams2_meta->method_drr.stretched_vtotal -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal * 2 +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
+ }
+ stream_pstate_meta->method_drr.common.allow_end_otg_vline =
+ stream_pstate_meta->method_drr.stretched_vtotal -
+ stream_pstate_meta->blackout_otg_vlines;
+ build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
}
static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
@@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
int stream_index)
{
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
stream_svp_meta->valid = true;
/* PMO FAMS2 precaulcates these values */
- stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
- stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
- stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
+ stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
+ stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
}
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
@@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
- build_fams2_meta_per_stream(pmo, display_config, stream_index);
+ build_pstate_meta_per_stream(pmo, display_config, stream_index);
/* SVP meta */
build_subvp_meta_per_stream(pmo, display_config, stream_index);
@@ -2077,7 +2100,7 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2098,7 +2121,7 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2144,9 +2167,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
/* copy FAMS2 meta */
if (success) {
display_config->stage3.fams2_required = fams2_required;
- memcpy(&display_config->stage3.stream_fams2_meta,
- &scratch->pmo_dcn4.stream_fams2_meta,
- sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
+ memcpy(&display_config->stage3.stream_pstate_meta,
+ &scratch->pmo_dcn4.stream_pstate_meta,
+ sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
}
return success;
@@ -2188,12 +2211,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
- struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
- get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
+ get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 6baab7ad6ecc..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 55d2464365d0..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 7218de1824cc..b90f6263cd85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e17b5ceba447..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
index e13b0c5939b0..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..5e14d85821e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
index 14d0ae03dce6..14d0ae03dce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
index 4a7c4c62111e..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
index 53bd8602f9ef..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
index 611c80f4f1bf..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index d52aa82283b3..9f562f0c4797 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -255,7 +255,7 @@ struct dml2_implicit_svp_meta {
unsigned long v_front_porch;
};
-struct dml2_fams2_per_method_common_meta {
+struct dml2_pstate_per_method_common_meta {
/* generic params */
unsigned int allow_start_otg_vline;
unsigned int allow_end_otg_vline;
@@ -265,7 +265,7 @@ struct dml2_fams2_per_method_common_meta {
double period_us;
};
-struct dml2_fams2_meta {
+struct dml2_pstate_meta {
bool valid;
double otg_vline_time_us;
unsigned int scheduling_delay_otg_vlines;
@@ -280,14 +280,14 @@ struct dml2_fams2_meta {
unsigned int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
- unsigned int dram_clk_change_blackout_otg_vlines;
+ unsigned int blackout_otg_vlines;
struct {
double max_vactive_det_fill_delay_us;
unsigned int max_vactive_det_fill_delay_otg_vlines;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vactive;
struct {
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vblank;
struct {
unsigned int programming_delay_otg_vlines;
@@ -296,15 +296,24 @@ struct dml2_fams2_meta {
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_subvp;
struct {
unsigned int programming_delay_otg_vlines;
unsigned int stretched_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_drr;
};
+/* mask of synchronized timings by stream index */
+struct dml2_pmo_synchronized_timing_groups {
+ unsigned int num_timing_groups;
+ unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
+ bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
+ double group_line_time_us[DML2_MAX_PLANES];
+};
+
struct dml2_optimization_stage3_state {
bool performed;
bool success;
@@ -319,7 +328,7 @@ struct dml2_optimization_stage3_state {
// Meta-data for FAMS2
bool fams2_required;
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
@@ -472,6 +481,7 @@ struct dml2_core_scratch {
};
struct dml2_core_instance {
+ enum dml2_project_id project_id;
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
@@ -619,6 +629,12 @@ struct dml2_pmo_optimize_for_stutter_in_out {
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
+struct dml2_scheduling_check_locals {
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
+};
+
struct dml2_pmo_scratch {
union {
struct {
@@ -648,7 +664,7 @@ struct dml2_pmo_scratch {
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
@@ -663,7 +679,7 @@ struct dml2_pmo_scratch {
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
- struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 4cfe64aa8492..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
index 1538b708d8be..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
index 7ca7f2a743c2..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
index 140ec01545db..55b3e3ca54f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
@@ -23,7 +23,7 @@
* Authors: AMD
*
*/
-
+
#ifndef __DML2_INTERNAL_TYPES_H__
#define __DML2_INTERNAL_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index c59f825cfae9..66040c877d68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -24,6 +24,7 @@
*
*/
+
#include "dml2_dc_types.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
index 9d64851f54e7..9d64851f54e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
index ef693f608d59..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
index e83e05248592..e83e05248592 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index 3b866e876bf4..d834cb595afa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
+
case dml_project_dcn401:
out->pct_ideal_fabric_bw_after_urgent = 76; //67;
out->max_avg_sdp_bw_use_normal_percent = 75; //80;
@@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+
+
case dml_project_dcn401:
p->in_states->num_states = 2;
transactions_per_mem_clock = 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
index d764773938f4..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
index 04fcfe637119..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 9deb03a18ccc..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
index c384e141cebc..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
index 17f0972b1af7..17f0972b1af7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
index f7d30b47beff..d459f93cf40b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
@@ -31,3 +31,4 @@
*/
#include "os_types.h"
#include "cmntypes.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
index 00d22e542469..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
index bf491cf0582d..bf491cf0582d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
index 2a2f84e07ca8..7fadbe6d7af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+
#ifndef __DML_LOGGING_H__
#define __DML_LOGGING_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 01480a04f85e..ce91e5d28956 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base)
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+
+ dpp_base->cursor_offload = false;
}
@@ -484,10 +486,12 @@ void dpp1_set_cursor_position(
cur_en = 0; /* not visible beyond top edge*/
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
@@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias;
+ dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index f466182963f7..b12f34345a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1348,7 +1348,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
uint32_t CM_HDR_MULT_COEF; \
- uint32_t CURSOR0_FP_SCALE_BIAS;
+ uint32_t CURSOR0_FP_SCALE_BIAS; \
+ uint32_t OBUF_CONTROL;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1450,7 +1451,6 @@ void dpp1_set_degamma(
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params);
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 09be2a90cc79..ef4a16117181 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
}
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ dpp_reg_state->recout_start = REG_READ(RECOUT_START);
+ dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE);
+ dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_mode = REG_READ(SCL_MODE);
+ dpp_reg_state->cm_control = REG_READ(CM_CONTROL);
+ dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL);
+ dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL);
+ dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL);
+ dpp_reg_state->mpc_size = REG_READ(MPC_SIZE);
+}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -578,9 +598,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index f236824126e9..d4a70b4379ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha(
void dpp30_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state);
+
bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index fa67e54bf94e..8a5aa5e86850 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_read_reg_state = dpp30_read_reg_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index f7a373a3d70a..977d83bf7741 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv(
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 36187f890d5d..96c2c853de42 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 7aab77b58869..62bf7cea21d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -132,10 +136,12 @@ void dpp401_set_cursor_position(
uint32_t cur_en = pos->enable ? 1 : 0;
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
@@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias;
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 89f0d999bf35..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL);
+ dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS);
+}
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
@@ -407,7 +415,7 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// Need to find the ceiling value for the slice width
- dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index a9c04fc95bd1..2337c3a97235 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
uint8_t *dsc_packed_pps);
void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 6f4f5a3c4861..f9c6377ac66c 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -32,6 +32,7 @@ static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
static const struct dsc_funcs dcn35_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 7bd92ae8b13e..c1bdbb38c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
.dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
+ .dsc_read_reg_state = dsc2_read_reg_state
};
/* Macro definitios for REG_SET macros*/
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index b0bd1f9425b5..81c83d5fe042 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -41,6 +41,7 @@ struct dsc_config {
enum dc_color_depth color_depth; /* Bits per component */
bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
+ uint32_t dsc_padding;
};
@@ -65,6 +66,10 @@ struct dcn_dsc_state {
uint32_t dsc_opp_source;
};
+struct dcn_dsc_reg_state {
+ uint32_t dsc_top_control;
+ uint32_t dscc_interrupt_control_status;
+};
/* DSC encoder capabilities
* They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps.
@@ -99,6 +104,7 @@ struct dsc_enc_caps {
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+ void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index e7e5f6d4778e..181a93dc46e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,33 +440,15 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
-void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes)
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0],
- DET0_SIZE, &target_det_sizes[0]);
-
- REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1],
- DET1_SIZE, &target_det_sizes[1]);
-
- REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2],
- DET2_SIZE, &target_det_sizes[2]);
-
- REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3],
- DET3_SIZE, &target_det_sizes[3]);
-
-}
-
-uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub)
-{
- struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t compbuf_config_error = 0;
-
- REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR,
- &compbuf_config_error);
-
- return compbuf_config_error;
+ hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL);
+ hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL);
+ hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL);
+ hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL);
+ hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL);
}
static const struct hubbub_funcs hubbub30_funcs = {
@@ -486,8 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index 49a469969d36..9e14de3ccaee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,10 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
-void hubbub3_get_det_sizes(struct hubbub *hubbub,
- uint32_t *curr_det_sizes,
- uint32_t *target_det_sizes);
-
-uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub);
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index cdb20251a154..d1aaa58b7db3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -1071,8 +1071,7 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 4d4ca6d77bbd..237331b35378 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -1037,8 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index a443722a8632..1b7746a6549a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -589,8 +589,7 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index a36273a52880..d11afd1ce72a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,8 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 9b026600b90e..6378e3fd7249 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp)
{
memset(&hubp->pos, 0, sizeof(hubp->pos));
memset(&hubp->att, 0, sizeof(hubp->att));
+ hubp->cursor_offload = false;
}
void hubp1_program_surface_config(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index cf2eb9793008..f2571076fc50 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -105,7 +105,9 @@
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
SRI(HUBP_CLK_CNTL, HUBP, id),\
- SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -251,7 +253,19 @@
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
uint32_t HUBP_CLK_CNTL; \
- uint32_t HUBPRET_READ_LINE_VALUE
+ uint32_t HUBPRET_READ_LINE_VALUE; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \
+ uint32_t HUBPRET_INTERRUPT; \
+ uint32_t HUBPRET_MEM_PWR_CTRL; \
+ uint32_t HUBPRET_MEM_PWR_STATUS; \
+ uint32_t HUBPRET_READ_LINE_CTRL0; \
+ uint32_t HUBPRET_READ_LINE_CTRL1; \
+ uint32_t HUBPRET_READ_LINE0; \
+ uint32_t HUBPRET_READ_LINE1; \
+ uint32_t HUBPREQ_MEM_PWR_CTRL; \
+ uint32_t HUBPREQ_MEM_PWR_STATUS
+
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -688,6 +702,123 @@ struct dcn_fl_regs_st {
uint32_t lut_fl_mode;
uint32_t lut_fl_format;
};
+struct dcn_hubp_reg_state {
+ uint32_t hubp_cntl;
+ uint32_t mall_config;
+ uint32_t mall_sub_vp;
+ uint32_t hubp_req_size_config;
+ uint32_t hubp_req_size_config_c;
+ uint32_t vmpg_config;
+ uint32_t addr_config;
+ uint32_t pri_viewport_dimension;
+ uint32_t pri_viewport_dimension_c;
+ uint32_t pri_viewport_start;
+ uint32_t pri_viewport_start_c;
+ uint32_t sec_viewport_dimension;
+ uint32_t sec_viewport_dimension_c;
+ uint32_t sec_viewport_start;
+ uint32_t sec_viewport_start_c;
+ uint32_t surface_config;
+ uint32_t tiling_config;
+ uint32_t clk_cntl;
+ uint32_t mall_status;
+ uint32_t measure_win_ctrl_dcfclk;
+ uint32_t measure_win_ctrl_dppclk;
+
+ uint32_t blank_offset_0;
+ uint32_t blank_offset_1;
+ uint32_t cursor_settings;
+ uint32_t dcn_cur0_ttu_cntl0;
+ uint32_t dcn_cur0_ttu_cntl1;
+ uint32_t dcn_cur1_ttu_cntl0;
+ uint32_t dcn_cur1_ttu_cntl1;
+ uint32_t dcn_dmdat_vm_cntl;
+ uint32_t dcn_expansion_mode;
+ uint32_t dcn_global_ttu_cntl;
+ uint32_t dcn_surf0_ttu_cntl0;
+ uint32_t dcn_surf0_ttu_cntl1;
+ uint32_t dcn_surf1_ttu_cntl0;
+ uint32_t dcn_surf1_ttu_cntl1;
+ uint32_t dcn_ttu_qos_wm;
+ uint32_t dcn_vm_mx_l1_tlb_cntl;
+ uint32_t dcn_vm_system_aperture_high_addr;
+ uint32_t dcn_vm_system_aperture_low_addr;
+ uint32_t dcsurf_flip_control;
+ uint32_t dcsurf_flip_control2;
+ uint32_t dcsurf_primary_meta_surface_address;
+ uint32_t dcsurf_primary_meta_surface_address_c;
+ uint32_t dcsurf_primary_meta_surface_address_high;
+ uint32_t dcsurf_primary_meta_surface_address_high_c;
+ uint32_t dcsurf_primary_surface_address;
+ uint32_t dcsurf_primary_surface_address_c;
+ uint32_t dcsurf_primary_surface_address_high;
+ uint32_t dcsurf_primary_surface_address_high_c;
+ uint32_t dcsurf_secondary_meta_surface_address;
+ uint32_t dcsurf_secondary_meta_surface_address_c;
+ uint32_t dcsurf_secondary_meta_surface_address_high;
+ uint32_t dcsurf_secondary_meta_surface_address_high_c;
+ uint32_t dcsurf_secondary_surface_address;
+ uint32_t dcsurf_secondary_surface_address_c;
+ uint32_t dcsurf_secondary_surface_address_high;
+ uint32_t dcsurf_secondary_surface_address_high_c;
+ uint32_t dcsurf_surface_control;
+ uint32_t dcsurf_surface_earliest_inuse;
+ uint32_t dcsurf_surface_earliest_inuse_c;
+ uint32_t dcsurf_surface_earliest_inuse_high;
+ uint32_t dcsurf_surface_earliest_inuse_high_c;
+ uint32_t dcsurf_surface_flip_interrupt;
+ uint32_t dcsurf_surface_inuse;
+ uint32_t dcsurf_surface_inuse_c;
+ uint32_t dcsurf_surface_inuse_high;
+ uint32_t dcsurf_surface_inuse_high_c;
+ uint32_t dcsurf_surface_pitch;
+ uint32_t dcsurf_surface_pitch_c;
+ uint32_t dst_after_scaler;
+ uint32_t dst_dimensions;
+ uint32_t dst_y_delta_drq_limit;
+ uint32_t flip_parameters_0;
+ uint32_t flip_parameters_1;
+ uint32_t flip_parameters_2;
+ uint32_t flip_parameters_3;
+ uint32_t flip_parameters_4;
+ uint32_t flip_parameters_5;
+ uint32_t flip_parameters_6;
+ uint32_t hubpreq_mem_pwr_ctrl;
+ uint32_t hubpreq_mem_pwr_status;
+ uint32_t nom_parameters_0;
+ uint32_t nom_parameters_1;
+ uint32_t nom_parameters_2;
+ uint32_t nom_parameters_3;
+ uint32_t nom_parameters_4;
+ uint32_t nom_parameters_5;
+ uint32_t nom_parameters_6;
+ uint32_t nom_parameters_7;
+ uint32_t per_line_delivery;
+ uint32_t per_line_delivery_pre;
+ uint32_t prefetch_settings;
+ uint32_t prefetch_settings_c;
+ uint32_t ref_freq_to_pix_freq;
+ uint32_t uclk_pstate_force;
+ uint32_t vblank_parameters_0;
+ uint32_t vblank_parameters_1;
+ uint32_t vblank_parameters_2;
+ uint32_t vblank_parameters_3;
+ uint32_t vblank_parameters_4;
+ uint32_t vblank_parameters_5;
+ uint32_t vblank_parameters_6;
+ uint32_t vmid_settings_0;
+
+ uint32_t hubpret_control;
+ uint32_t hubpret_interrupt;
+ uint32_t hubpret_mem_pwr_ctrl;
+ uint32_t hubpret_mem_pwr_status;
+ uint32_t hubpret_read_line_ctrl0;
+ uint32_t hubpret_read_line_ctrl1;
+ uint32_t hubpret_read_line_status;
+ uint32_t hubpret_read_line_value;
+ uint32_t hubpret_read_line0;
+ uint32_t hubpret_read_line1;
+};
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
@@ -718,7 +849,6 @@ struct dcn_hubp_state {
uint32_t hubp_cntl;
uint32_t flip_control;
};
-
struct dcn10_hubp {
struct hubp base;
struct dcn_hubp_state state;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 91259b896e03..92288de4cc10 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes(
hubp->curs_attr = *attr;
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ }
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
@@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position(
cur_en = 0; /* not visible beyond top edge*/
if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0;
+
+ if (cur_en && cursor_not_programmed)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index f325db555102..7062e6653062 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -145,7 +145,8 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0
+ uint32_t VMID_SETTINGS_0;\
+ uint32_t DST_Y_DELTA_DRQ_LIMIT
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
@@ -176,7 +177,10 @@
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
- uint32_t DCHUBP_MCACHEID_CONFIG
+ uint32_t DCHUBP_MCACHEID_CONFIG;\
+ uint32_t DCHUBP_MALL_SUB_VP;\
+ uint32_t DCHUBP_ADDR_CONFIG;\
+ uint32_t HUBP_MALL_STATUS
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e2740482e1cf..08ea0a1b9e7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -73,8 +73,6 @@
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
- * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
- *
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 556214b2227d..0cc6f4558989 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp)
}
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG);
+ reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP);
+ reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG);
+ reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C);
+ reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG);
+ reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG);
+ reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION);
+ reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C);
+ reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START);
+ reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C);
+ reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION);
+ reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C);
+ reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START);
+ reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C);
+ reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG);
+ reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG);
+ reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL);
+ reg_state->mall_status = REG_READ(HUBP_MALL_STATUS);
+ reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK);
+ reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK);
+
+ reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0);
+ reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1);
+ reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS);
+ reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0);
+ reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1);
+ reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0);
+ reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1);
+ reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL);
+ reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE);
+ reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL);
+ reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0);
+ reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1);
+ reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0);
+ reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1);
+ reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM);
+ reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL);
+ reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR);
+ reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR);
+ reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+ reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2);
+ reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL);
+ reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE);
+ reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C);
+ reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH);
+ reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT);
+ reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE);
+ reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C);
+ reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH);
+ reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH);
+ reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C);
+ reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER);
+ reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS);
+ reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT);
+ reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0);
+ reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1);
+ reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2);
+ reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3);
+ reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4);
+ reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5);
+ reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6);
+ reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL);
+ reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS);
+ reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0);
+ reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1);
+ reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2);
+ reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3);
+ reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4);
+ reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5);
+ reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6);
+ reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7);
+ reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY);
+ reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE);
+ reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS);
+ reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C);
+ reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ);
+ reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+ reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0);
+ reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1);
+ reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2);
+ reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3);
+ reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4);
+ reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5);
+ reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6);
+ reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0);
+ reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL);
+ reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT);
+ reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL);
+ reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS);
+ reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0);
+ reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1);
+ reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS);
+ reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE);
+ reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0);
+ reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1);
+}
+
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -505,30 +625,6 @@ void hubp3_init(struct hubp *hubp)
hubp_reset(hubp);
}
-uint32_t hubp3_get_current_read_line(struct hubp *hubp)
-{
- uint32_t read_line = 0;
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_GET(HUBPRET_READ_LINE_VALUE,
- PIPE_READ_LINE,
- &read_line);
-
- return read_line;
-}
-
-unsigned int hubp3_get_underflow_status(struct hubp *hubp)
-{
- uint32_t hubp_underflow = 0;
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_GET(DCHUBP_CNTL,
- HUBP_UNDERFLOW_STATUS,
- &hubp_underflow);
-
- return hubp_underflow;
-}
-
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -558,8 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index 842f4eb72cc8..c767e9f4f9b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -296,6 +296,8 @@ void hubp3_dmdata_set_attributes(
void hubp3_read_state(struct hubp *hubp);
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
+
void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 47101847c2b7..189045f85039 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -110,9 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index a5f23bb2a76a..a781085b046b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes(
uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
uint32_t cursor_height = attr->height;
uint32_t cursor_size = cursor_width * cursor_height;
-
- hubp->curs_attr = *attr;
-
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ bool use_mall_for_cursor;
switch (attr->color_format) {
case CURSOR_MODE_MONO:
@@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes(
cursor_size *= 8;
break;
}
+ use_mall_for_cursor = cursor_size > 16384 ? 1 : 0;
+
+ hubp->curs_attr = *attr;
- if (cursor_size > 16384)
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
- else
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor);
+ }
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+ hubp->use_mall_for_cursor = use_mall_for_cursor;
}
void hubp32_init(struct hubp *hubp)
{
@@ -206,9 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index b140808f21af..79c583e258c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
+ .hubp_read_reg_state = hubp3_read_reg_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp35_init,
@@ -218,9 +219,6 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 0fcbc6a35be6..f01eae50d02f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -783,21 +783,23 @@ void hubp401_cursor_set_position(
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, x_pos,
- CURSOR_Y_POSITION, y_pos);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
-
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
@@ -1071,9 +1073,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
.hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index fdabbeec8ffa..4570b8016de5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 24184b4eb352..3005115c8505 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
}
+static void
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_encoder_control encoder_control = {0};
+
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+
+ bios->funcs->encoder_control(bios, &encoder_control);
+}
+
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -689,6 +703,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -1176,7 +1193,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.stream_enc);
- dc->hwss.disable_audio_stream(pipe_ctx);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dc->hwss.disable_audio_stream(pipe_ctx);
link_hwss->reset_stream_encoder(pipe_ctx);
@@ -1196,6 +1214,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1581,6 +1602,51 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+ uint8_t bit_depth;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ bit_depth = 0;
+ break;
+ case COLOR_DEPTH_666:
+ bit_depth = 6;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ bit_depth = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bit_depth = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bit_depth = 16;
+ break;
+ }
+
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
+
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -1600,6 +1666,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1679,7 +1749,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1913,6 +1984,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -2005,9 +2077,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
power_down_all_hw_blocks(dc);
/* DSC could be enabled on eDP during VBIOS post.
- * To clean up dsc blocks if eDP is in link but not active.
+ * To clean up dsc blocks if all eDP dpms_off is true.
*/
- if (edp_link_with_sink && (edp_stream_num == 0))
+ for (i = 0; i < edp_stream_num; i++) {
+ if (!edp_streams[i]->dpms_off) {
+ should_clean_dsc_block = false;
+ }
+ }
+
+ if (should_clean_dsc_block)
clean_up_dsc_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index e9fe97f0c4ea..fa62e40a9858 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -2245,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -3090,6 +3090,9 @@ static void dcn10_update_dchubp_dpp(
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 9477c9f9e196..6bd905905984 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1449,7 +1449,7 @@ void dcn20_pipe_control_lock(
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -1793,6 +1793,9 @@ void dcn20_update_dchubp_dpp(
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index e47ed5571dfd..81bcadf5e57e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -53,7 +53,8 @@
#include "link_service.h"
#include "dc_state_priv.h"
-
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
#define DC_LOGGER_INIT(logger)
@@ -1235,44 +1236,47 @@ void dcn30_get_underflow_debug_data(const struct dc *dc,
{
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (tg) {
- uint32_t v_blank_start = 0, v_blank_end = 0;
-
- out_data->otg_inst = tg->inst;
-
- tg->funcs->get_scanoutpos(tg,
- &v_blank_start,
- &v_blank_end,
- &out_data->h_position,
- &out_data->v_position);
-
- out_data->otg_frame_count = tg->funcs->get_frame_count(tg);
-
- out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg);
+ if (hubbub) {
+ if (hubbub->funcs->hubbub_read_reg_state) {
+ hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state);
+ }
}
for (int i = 0; i < MAX_PIPES; i++) {
struct hubp *hubp = dc->res_pool->hubps[i];
-
- if (hubp) {
- if (hubp->funcs->hubp_get_underflow_status)
- out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp);
-
- if (hubp->funcs->hubp_in_blank)
- out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp);
-
- if (hubp->funcs->hubp_get_current_read_line)
- out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp);
-
- if (hubp->funcs->hubp_get_det_config_error)
- out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp);
- }
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct output_pixel_processor *opp = dc->res_pool->opps[i];
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct timing_generator *optc = dc->res_pool->timing_generators[i];
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (hubp)
+ if (hubp->funcs->hubp_read_reg_state)
+ hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]);
+
+ if (dpp)
+ if (dpp->funcs->dpp_read_reg_state)
+ dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]);
+
+ if (opp)
+ if (opp->funcs->opp_read_reg_state)
+ opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]);
+
+ if (dsc)
+ if (dsc->funcs->dsc_read_reg_state)
+ dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]);
+
+ if (mpc)
+ if (mpc->funcs->mpc_read_reg_state)
+ mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]);
+
+ if (optc)
+ if (optc->funcs->optc_read_reg_state)
+ optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]);
+
+ if (dccg)
+ if (dccg->funcs->dccg_read_reg_state)
+ dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]);
}
-
- if (hubbub->funcs->get_det_sizes)
- hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes);
-
- if (hubbub->funcs->compbuf_config_error)
- out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub);
-
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index b822f2dffff0..d1ecdb92b072 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+ if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX)
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index f925f669f2a4..4ee6ed610de0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index f39292952702..bf19ba65d09a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 05011061822c..7aa0f452e8f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
-
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
@@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
/*make sure DPPCLK is on*/
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
hubp->power_gated = true;
@@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc)
if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ /*
+ * Insert a blank update to modify the write index and set pipe_mask to 0.
+ *
+ * While the DMU is interlocked with driver full pipe programming via
+ * the DMU HW lock, if the cursor update begins to execute after a full
+ * pipe programming occurs there are two possible issues:
+ *
+ * 1. Outdated cursor information is programmed, replacing the current update
+ * 2. The cursor update in firmware holds the cursor lock, preventing
+ * the current update from being latched atomically in the same frame
+ * as the rest of the update.
+ *
+ * This blank update, treated as a no-op, will allow the firmware to skip
+ * the programming.
+ */
+
+ if (dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe);
+
+ if (dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe);
+}
+
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = true;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = true;
+}
+
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = false;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = false;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
+ .data.cursor_offload_v1.offload_streams[stream_idx];
+
+ shared_stream->last_write_idx = write_idx;
+
+ cs->offload_streams[stream_idx].write_idx = write_idx;
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
+}
+
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
+}
+
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ dc_dmub_srv_program_cursor_now(dc, pipe);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0b1d6f608edd..1ff41dba556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
void dcn35_hardware_release(struct dc *dc);
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index f2f16a0bdb4f..5a66c9db2670 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7c276c319086..f02edc9371b0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -26,9 +26,11 @@
#include "clk_mgr.h"
#include "dsc.h"
#include "link_service.h"
+#include "custom_float.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_hwseq.h"
@@ -36,6 +38,7 @@
#include "dcn401/dcn401_resource.h"
#include "dc_state_priv.h"
#include "link_enc_cfg.h"
+#include "../hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -200,6 +203,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -1404,9 +1410,9 @@ void dcn401_prepare_bandwidth(struct dc *dc,
}
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, false);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
@@ -1425,9 +1431,9 @@ void dcn401_optimize_bandwidth(
/* enable fams2 if needed */
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
/* program dchubbub watermarks */
@@ -1466,14 +1472,17 @@ void dcn401_optimize_bandwidth(
}
}
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
- if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
+ if (!dc->ctx || !dc->ctx->dmub_srv)
+ return;
+
+ if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
return;
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1483,12 +1492,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
{
- struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
- bool lock = params->fams2_global_control_lock_fast_params.lock;
+ struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
+ bool lock = params->dmub_hw_control_lock_fast_params.lock;
- if (params->fams2_global_control_lock_fast_params.is_required) {
+ if (params->dmub_hw_control_lock_fast_params.is_required) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1595,6 +1604,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
+static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ struct pipe_ctx *old_opp_heads[MAX_PIPES];
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+ int i;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ old_otg_master = NULL;
+ }
+
+ /* Process new DSC configuration if DSC is enabled */
+ if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
+ struct dc_stream_state *stream = otg_master->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int last_dsc_calc = 0;
+ bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
+
+ /* Count ODM pipes */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
+
+ /* Step 1: Set DTO DSCCLK for main DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ otg_master->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Step 2: Calculate and set DSC config for main DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
+
+ /* Step 3: Enable main DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, otg_master);
+
+ /* Step 4: Configure and enable ODM DSC blocks */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (!odm_pipe->stream_res.dsc)
+ continue;
+
+ /* Set DTO DSCCLK for ODM DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ odm_pipe->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Calculate and set DSC config for ODM DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
+
+ /* Enable ODM DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
+ }
+
+ /* Step 5: Configure DSC in timing generator */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
+ &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
+ } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
+ /* Disable DSC in OPTC */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
+
+ hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
+ }
+
+ /* Disable DSC for old pipes that no longer need it */
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
+ for (i = 0; i < old_opp_head_count; i++) {
+ old_pipe = old_opp_heads[i];
+ new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
+
+ /* If old pipe had DSC but new pipe doesn't, disable the old DSC */
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
+ /* Then disconnect DSC block */
+ hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
+ }
+ }
+ }
+}
+
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_inst[MAX_PIPES] = {0};
+ int opp_head_count;
+ int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
+ int i;
+
+ opp_head_count = resource_get_opp_heads_for_otg_master(
+ otg_master, &context->res_ctx, opp_heads);
+
+ for (i = 0; i < opp_head_count; i++)
+ opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
+
+ /* Add ODM combine/bypass operation to sequence */
+ if (opp_head_count > 1) {
+ hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
+ opp_head_count, odm_slice_width, last_odm_slice_width);
+ } else {
+ hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
+ }
+
+ /* Add OPP operations to sequence */
+ for (i = 0; i < opp_head_count; i++) {
+ /* Add OPP pipe clock control operation */
+ hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
+
+ /* Add OPP program left edge extra pixel operation */
+ hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
+ opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
+ }
+
+ /* Add DSC update operations to sequence */
+ dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
+
+ /* Add blank pixel data operation if needed */
+ if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(
+ dc, otg_master, true, seq_state);
+ }
+}
+
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -2083,6 +2229,157 @@ void dcn401_program_pipe(
}
}
+/*
+ * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
+ *
+ * This function creates a sequence-based version of the original dcn401_program_pipe
+ * function. Instead of directly calling hardware programming functions, it appends
+ * sequence steps to the provided block_sequence array that can later be executed
+ * as part of hwss_execute_sequence.
+ *
+ */
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
+ !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
+ seq_state);
+ }
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
+ /* Step 1: Program global sync */
+ hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ /* Step 2: Wait for VACTIVE state (if not phantom pipe) */
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ /* Step 3: Set VTG params */
+ hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ /* Step 4: Setup vupdate interrupt (if available) */
+ if (hws->funcs.setup_vupdate_interrupt)
+ dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm) {
+ if (hws->funcs.update_odm_sequence)
+ hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (dc->hwss.enable_plane_sequence)
+ dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size) {
+ hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments) {
+ hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw)) {
+
+ if (dc->hwss.update_dchubp_dpp_sequence)
+ dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
+
+ hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable)) {
+
+ hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
+ }
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf) {
+ hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
+ }
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
+
+ hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+
+ hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_opp,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ (struct tg_color){0},
+ false,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+
+}
+
void dcn401_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
@@ -2160,7 +2457,6 @@ void dcn401_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2239,11 +2535,11 @@ void dcn401_program_front_end_for_ctx(
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
- !pipe->top_pipe &&
- pipe->stream &&
- pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
- dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
@@ -2355,7 +2651,6 @@ void dcn401_post_unlock_program_front_end(
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
-
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
@@ -2669,3 +2964,1082 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
+
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+ p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Check and set DC_IP_REQUEST_CNTL if needed */
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+ }
+
+ /* DPP power gating control */
+ hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
+
+ /* HUBP power gating control */
+ hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
+
+ /* HUBP reset */
+ hwss_add_hubp_reset(seq_state, hubp);
+
+ /* DPP reset */
+ hwss_add_dpp_reset(seq_state, dpp);
+
+ /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
+
+ /* DPP root clock control */
+ hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
+
+ /* Step 1: Remove MPCC from MPC tree */
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
+
+ // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
+ // so don't wait for MPCC_IDLE in the programming sequence
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
+ /* Step 2: Set MPCC disconnect pending flag */
+ hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
+ }
+
+ /* Step 3: Set optimized required flag */
+ hwss_add_dc_set_optimized_required(seq_state, dc, true);
+
+ /* Step 4: Disconnect HUBP if function exists */
+ if (hubp->funcs->hubp_disconnect)
+ hwss_add_hubp_disconnect(seq_state, hubp);
+
+ /* Step 5: Verify pstate change high if debug sanity checks are enabled */
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ struct rect odm_slice_src;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (blank) {
+ /* Set ABM immediate disable */
+ hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ /* Set display pattern generator for all ODM pipes */
+ while (odm_pipe->next_odm_pipe) {
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ /* Set display pattern generator for final ODM pipe */
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ /* Handle ABM level setting when not blanking */
+ if (!blank) {
+ if (stream_res->abm) {
+ /* Set pipe for ABM */
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ /* Set ABM level */
+ hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
+ }
+ }
+}
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ int i_wb, i_pipe;
+
+ if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
+ return;
+
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+ /* Get direct pointer to writeback info */
+ struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
+ int mpcc_inst = -1;
+
+ if (wb_info->wb_enabled) {
+ /* Get the MPCC instance for writeback_source_plane */
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
+ mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+
+ if (mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ continue;
+ }
+
+ ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* Writeback pipe already enabled, only need to update */
+ dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ }
+ }
+}
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update DWBC with new parameters */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Configure MCIF_WB buffer settings */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+
+ /* Configure MCIF_WB arbitration */
+ hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+
+ /* Enable MCIF_WB */
+ hwss_add_mcif_wb_enable(seq_state, mcif_wb);
+
+ /* Set DWB MUX to connect writeback to MPCC */
+ hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
+
+ /* Enable DWBC */
+ hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
+}
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Disable DWBC */
+ hwss_add_dwbc_disable(seq_state, dwb);
+
+ /* Disable DWB MUX */
+ hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
+
+ /* Disable MCIF_WB */
+ hwss_add_mcif_wb_disable(seq_state, mcif_wb);
+}
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update writeback pipe */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Update MCIF_WB buffer settings if needed */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+}
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
+ hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+}
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ /* Wait for MPCC disconnect */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
+
+ /* In flip immediate with pipe splitting case GSL is used for synchronization
+ * so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
+
+ /* Update HUBP mall sel */
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
+ hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
+
+ /* Set flip control GSL */
+ hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* HUBP clock control */
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* DPP clock control */
+ hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
+
+ /* Plane atomic power down */
+ if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
+ dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp, seq_state);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled */
+ if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
+ hwss_add_disable_phantom_crtc(seq_state, tg);
+}
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state)
+{
+ struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ /* Wait for all DPP pipes in current mpc blending tree completes double
+ * buffered disconnection before resetting OPP
+ */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
+
+ if (dsc) {
+ bool *is_ungated = NULL;
+ /* Check DSC power gate status */
+ if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
+ hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
+
+ /* Seamless update specific where we will postpone non
+ * double buffered DSCCLK disable logic in post unlock
+ * sequence after DSC is disconnected from OPP but not
+ * yet power gated.
+ */
+
+ /* DSC wait disconnect pending clear */
+ hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
+
+ /* DSC disable */
+ hwss_add_dsc_disable(seq_state, dsc, is_ungated);
+
+ /* Set reference DSCCLK */
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
+ }
+}
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
+}
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
+ return;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+
+ /* Step 1: DPP root clock control - enable clock */
+ if (hws->funcs.dpp_root_clock_control)
+ hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 2: Enable DC IP request (if needed) */
+ if (hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+
+ /* Step 3: DPP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
+ hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 4: HUBP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
+ hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ /* Step 5: Disable DC IP request (restore state) */
+ if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ /* Step 6: HUBP clock control - enable DCFCLK */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
+
+ /* Step 7: HUBP initialization */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
+ hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
+
+ /* Step 8: OPP pipe clock control - enable */
+ if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
+ hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
+
+ /* Step 9: VM system aperture settings */
+ if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
+ hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
+ dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
+ }
+
+ /* Step 10: Flip interrupt setup */
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
+ hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
+ }
+}
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
+
+ if (!hubp || !dpp || !plane_state)
+ return;
+
+ /* Step 1: DPP DPPCLK control */
+ if (pipe_ctx->update_flags.bits.dppclk)
+ hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
+
+ /* Step 2: DCCG update DPP DTO */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* Step 3: HUBP VTG selection */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
+
+ /* Step 4: HUBP setup (choose setup2 or setup) */
+ if (hubp->funcs->hubp_setup2) {
+ hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync, &pipe_ctx->stream->timing);
+ } else if (hubp->funcs->hubp_setup) {
+ hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
+ }
+ }
+
+ /* Step 5: Set unbounded requesting */
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
+
+ /* Step 6: HUBP interdependent setup */
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2)
+ hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
+ else if (hubp->funcs->hubp_setup_interdependent)
+ hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
+ }
+
+ /* Step 7: DPP setup - input CSC and format setup */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
+
+ /* Step 8: DPP cursor matrix setup */
+ if (dpp->funcs->set_cursor_matrix) {
+ hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
+ &plane_state->cursor_csc_color_matrix);
+ }
+
+ /* Step 9: DPP program bias and scale */
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
+ }
+
+ /* Step 10: MPCC updates */
+ if (pipe_ctx->update_flags.bits.mpcc ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change) {
+
+ /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
+ if (hws->funcs.update_mpcc_sequence)
+ hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ /* Step 11: DPP scaler setup */
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ /* Step 12: HUBP viewport programming */
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+ hwss_add_hubp_mem_program_viewport(seq_state, hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Step 13: HUBP program mcache if available */
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
+
+ /* Step 14: Cursor attribute setup */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+
+ hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
+
+ /* Step 15: Cursor position setup */
+ hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
+
+ /* Step 16: Cursor SDR white level */
+ if (dc->hwss.set_cursor_sdr_white_level)
+ hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 17: Gamut remap and output CSC */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.gamut_remap ||
+ plane_state->update_flags.bits.gamut_remap_change ||
+ pipe_ctx->stream->update_flags.bits.out_csc) {
+
+ /* Gamut remap */
+ hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
+
+ /* Output CSC */
+ hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
+ }
+
+ /* Step 18: HUBP surface configuration */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hwss_add_hubp_program_surface_config(seq_state, hubp,
+ plane_state->format, &plane_state->tiling_info, size,
+ plane_state->rotation, &plane_state->dcc,
+ plane_state->horizontal_mirror, 0);
+ hubp->power_gated = false;
+ }
+
+ /* Step 19: Update plane address (with SubVP support) */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update) {
+
+ /* SubVP save surface address if needed */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
+ hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
+ &pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
+ }
+
+ /* Update plane address */
+ hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 20: HUBP set blank - enable plane */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_hubp_set_blank(seq_state, hubp, false);
+
+ /* Step 21: Phantom HUBP post enable */
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
+ hwss_add_phantom_hubp_post_enable(seq_state, hubp);
+}
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (!hubp || !pipe_ctx->plane_state)
+ return;
+
+ per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+
+ /* Initialize blend configuration */
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /* MPCC instance is equal to HUBP instance */
+ mpcc_id = hubp->inst;
+
+ /* Step 1: Update blending if no full update needed */
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+
+ /* Update blending configuration */
+ hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
+
+ /* Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* Step 2: Get existing MPCC for DPP */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+
+ /* Step 3: Remove MPCC if being used */
+ if (new_mpcc != NULL) {
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
+ } else {
+ /* Step 4: Assert MPCC idle (debug only) */
+ if (dc->debug.sanity_checks)
+ hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
+ }
+
+ /* Step 5: Insert new plane into MPC tree */
+ hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
+
+ /* Step 6: Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+
+ /* Step 7: Set HUBP OPP and MPCC IDs */
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ int mpcc_inst;
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
+ }
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ if (hubp)
+ hwss_add_hubp_set_blank(seq_state, hubp, true);
+ }
+ }
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
+}
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
+}
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ int i;
+ unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
+ bool cache_cursor = false;
+
+ // Don't force p-state disallow -- can't block dummy p-state
+
+ // Update MALL_SEL register for each pipe (break down update_mall_sel call)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
+ cache_cursor = true;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
+ } else {
+ // MALL not supported with Stereo3D
+ uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface) ? 2 : 0;
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
+ }
+ }
+ }
+
+ // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
+ hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
+ }
+ }
+}
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high)
+ return;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ /* Attempt hardware workaround force recovery */
+ dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
+ }
+}
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp;
+ unsigned int i;
+
+ if (!dc->debug.recovery_enabled)
+ return false;
+
+ /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
+
+ /* Step 3: Set HUBP_DISABLE=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 4: Set HUBP_DISABLE=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, false);
+ }
+ }
+
+ /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
+
+ /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, false);
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 2621b7725267..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -9,6 +9,7 @@
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
+#include "hwss/hw_sequencer.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
@@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
@@ -97,6 +100,11 @@ void dcn401_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
@@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
void dcn401_initialize_min_clocks(struct dc *dc);
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable);
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index d6e11b7e4fce..162096ce0bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -9,6 +9,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
@@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_plane_sequence = dcn401_disable_plane_sequence,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
@@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -95,55 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- .fams2_global_control_lock = dcn401_fams2_global_control_lock,
+ .dmub_hw_control_lock = dcn401_dmub_hw_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
- .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
.detect_pipe_changes = dcn401_detect_pipe_changes,
.enable_plane = dcn20_enable_plane,
+ .enable_plane_sequence = dcn401_enable_plane_sequence,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence,
.get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence,
.update_mpcc = dcn20_update_mpcc,
+ .update_mpcc_sequence = dcn401_update_mpcc_sequence,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
+ .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence,
.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn401_plane_atomic_power_down,
+ .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence,
.update_odm = dcn401_update_odm,
+ .update_odm_sequence = dcn401_update_odm_sequence,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
+ .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
.perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
+ .program_pipe_sequence = dcn401_program_pipe_sequence,
+ .dc_ip_request_cntl = dcn401_dc_ip_request_cntl,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 1723bbcf2c46..3772b4aa11cc 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -31,6 +31,8 @@
#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "inc/core_status.h"
+#include "inc/hw/hw_shared.h"
+#include "dsc/dsc.h"
struct pipe_ctx;
struct dc_state;
@@ -48,6 +50,8 @@ struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
struct dc_underflow_debug_data;
+struct dsc_optc_config;
+struct vm_system_aperture_param;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -62,7 +66,7 @@ struct pipe_control_lock_params {
};
struct set_flip_control_gsl_params {
- struct pipe_ctx *pipe_ctx;
+ struct hubp *hubp;
bool flip_immediate;
};
@@ -148,12 +152,582 @@ struct wait_for_dcc_meta_propagation_params {
const struct pipe_ctx *top_pipe_to_program;
};
-struct fams2_global_control_lock_fast_params {
+struct dmub_hw_control_lock_fast_params {
struct dc *dc;
bool is_required;
bool lock;
};
+struct program_surface_config_params {
+ struct hubp *hubp;
+ enum surface_pixel_format format;
+ struct dc_tiling_info *tiling_info;
+ struct plane_size plane_size;
+ enum dc_rotation_angle rotation;
+ struct dc_plane_dcc_param *dcc;
+ bool horizontal_mirror;
+ int compat_level;
+};
+
+struct program_mcache_id_and_split_coordinate {
+ struct hubp *hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs;
+};
+
+struct program_cursor_update_now_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct hubp_wait_pipe_read_start_params {
+ struct hubp *hubp;
+};
+
+struct apply_update_flags_for_phantom_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct update_phantom_vp_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct set_odm_combine_params {
+ struct timing_generator *tg;
+ int opp_inst[MAX_PIPES];
+ int opp_head_count;
+ int odm_slice_width;
+ int last_odm_slice_width;
+};
+
+struct set_odm_bypass_params {
+ struct timing_generator *tg;
+ const struct dc_crtc_timing *timing;
+};
+
+struct opp_pipe_clock_control_params {
+ struct output_pixel_processor *opp;
+ bool enable;
+};
+
+struct opp_program_left_edge_extra_pixel_params {
+ struct output_pixel_processor *opp;
+ enum dc_pixel_encoding pixel_encoding;
+ bool is_otg_master;
+};
+
+struct dccg_set_dto_dscclk_params {
+ struct dccg *dccg;
+ int inst;
+ int num_slices_h;
+};
+
+struct dsc_set_config_params {
+ struct display_stream_compressor *dsc;
+ struct dsc_config *dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg;
+};
+
+struct dsc_enable_params {
+ struct display_stream_compressor *dsc;
+ int opp_inst;
+};
+
+struct tg_set_dsc_config_params {
+ struct timing_generator *tg;
+ struct dsc_optc_config *dsc_optc_cfg;
+ bool enable;
+};
+
+struct dsc_disconnect_params {
+ struct display_stream_compressor *dsc;
+};
+
+struct dsc_read_state_params {
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state *dsc_state;
+};
+
+struct dsc_calculate_and_set_config_params {
+ struct pipe_ctx *pipe_ctx;
+ struct dsc_optc_config dsc_optc_cfg;
+ bool enable;
+ int opp_cnt;
+};
+
+struct dsc_enable_with_opp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_tg_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct tg_program_global_sync_params {
+ struct timing_generator *tg;
+ int vready_offset;
+ unsigned int vstartup_lines;
+ unsigned int vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines;
+};
+
+struct tg_wait_for_state_params {
+ struct timing_generator *tg;
+ enum crtc_state state;
+};
+
+struct tg_set_vtg_params_params {
+ struct timing_generator *tg;
+ struct dc_crtc_timing *timing;
+ bool program_fp2;
+};
+
+struct tg_set_gsl_params {
+ struct timing_generator *tg;
+ struct gsl_params gsl;
+};
+
+struct tg_set_gsl_source_select_params {
+ struct timing_generator *tg;
+ int group_idx;
+ uint32_t gsl_ready_signal;
+};
+
+struct setup_vupdate_interrupt_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct tg_setup_vertical_interrupt2_params {
+ struct timing_generator *tg;
+ int start_line;
+};
+
+struct dpp_set_hdr_multiplier_params {
+ struct dpp *dpp;
+ uint32_t hw_mult;
+};
+
+struct program_det_size_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_buffer_size_kb;
+};
+
+struct program_det_segments_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_size;
+};
+
+struct update_dchubp_dpp_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct opp_set_dyn_expansion_params {
+ struct output_pixel_processor *opp;
+ enum dc_color_space color_space;
+ enum dc_color_depth color_depth;
+ enum signal_type signal;
+};
+
+struct opp_program_fmt_params {
+ struct output_pixel_processor *opp;
+ struct bit_depth_reduction_params *fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping;
+};
+
+struct opp_program_bit_depth_reduction_params {
+ struct output_pixel_processor *opp;
+ bool use_default_params;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct opp_set_disp_pattern_generator_params {
+ struct output_pixel_processor *opp;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ struct tg_color solid_color;
+ bool use_solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct set_abm_pipe_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_abm_level_params {
+ struct abm *abm;
+ unsigned int abm_level;
+};
+
+struct set_abm_immediate_disable_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_disp_pattern_generator_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ const struct tg_color *solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct mpc_update_blending_params {
+ struct mpc *mpc;
+ struct mpcc_blnd_cfg blnd_cfg;
+ int mpcc_id;
+};
+
+struct mpc_assert_idle_mpcc_params {
+ struct mpc *mpc;
+ int mpcc_id;
+};
+
+struct mpc_insert_plane_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg;
+ struct mpcc *insert_above_mpcc;
+ int dpp_id;
+ int mpcc_id;
+};
+
+struct mpc_remove_mpcc_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove;
+};
+
+struct opp_set_mpcc_disconnect_pending_params {
+ struct output_pixel_processor *opp;
+ int mpcc_inst;
+ bool pending;
+};
+
+struct dc_set_optimized_required_params {
+ struct dc *dc;
+ bool optimized_required;
+};
+
+struct hubp_disconnect_params {
+ struct hubp *hubp;
+};
+
+struct hubbub_force_pstate_change_control_params {
+ struct hubbub *hubbub;
+ bool enable;
+ bool wait;
+};
+
+struct tg_enable_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct hubp_wait_flip_pending_params {
+ struct hubp *hubp;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct tg_wait_double_buffer_pending_params {
+ struct timing_generator *tg;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct update_force_pstate_params {
+ struct dc *dc;
+ struct dc_state *context;
+};
+
+struct hubbub_apply_dedcn21_147_wa_params {
+ struct hubbub *hubbub;
+};
+
+struct hubbub_allow_self_refresh_control_params {
+ struct hubbub *hubbub;
+ bool allow;
+ bool *disallow_self_refresh_applied;
+};
+
+struct tg_get_frame_count_params {
+ struct timing_generator *tg;
+ unsigned int *frame_count;
+};
+
+struct mpc_set_dwb_mux_params {
+ struct mpc *mpc;
+ int dwb_id;
+ int mpcc_id;
+};
+
+struct mpc_disable_dwb_mux_params {
+ struct mpc *mpc;
+ unsigned int dwb_id;
+};
+
+struct mcif_wb_config_buf_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+ unsigned int dest_height;
+};
+
+struct mcif_wb_config_arb_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_arb_params *mcif_arb_params;
+};
+
+struct mcif_wb_enable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct mcif_wb_disable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct dwbc_enable_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct dwbc_disable_params {
+ struct dwbc *dwb;
+};
+
+struct dwbc_update_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct hubp_update_mall_sel_params {
+ struct hubp *hubp;
+ uint32_t mall_sel;
+ bool cache_cursor;
+};
+
+struct hubp_prepare_subvp_buffering_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_set_blank_en_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_disable_control_params {
+ struct hubp *hubp;
+ bool disable;
+};
+
+struct hubbub_soft_reset_params {
+ struct hubbub *hubbub;
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset);
+ bool reset;
+};
+
+struct hubp_clk_cntl_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_init_params {
+ struct hubp *hubp;
+};
+
+struct hubp_set_vm_system_aperture_settings_params {
+ struct hubp *hubp;
+ //struct vm_system_aperture_param apt;
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct hubp_set_flip_int_params {
+ struct hubp *hubp;
+};
+
+struct dpp_dppclk_control_params {
+ struct dpp *dpp;
+ bool dppclk_div;
+ bool enable;
+};
+
+struct disable_phantom_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct dpp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool power_on;
+};
+
+struct hubp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int hubp_inst;
+ bool power_on;
+};
+
+struct hubp_reset_params {
+ struct hubp *hubp;
+};
+
+struct dpp_reset_params {
+ struct dpp *dpp;
+};
+
+struct dpp_root_clock_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool clock_on;
+};
+
+struct dc_ip_request_cntl_params {
+ struct dc *dc;
+ bool enable;
+};
+
+struct dsc_pg_status_params {
+ struct dce_hwseq *hws;
+ int dsc_inst;
+ bool is_ungated;
+};
+
+struct dsc_wait_disconnect_pending_clear_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dsc_disable_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dccg_set_ref_dscclk_params {
+ struct dccg *dccg;
+ int dsc_inst;
+ bool *is_ungated;
+};
+
+struct dccg_update_dpp_dto_params {
+ struct dccg *dccg;
+ int dpp_inst;
+ int dppclk_khz;
+};
+
+struct hubp_vtg_sel_params {
+ struct hubp *hubp;
+ uint32_t otg_inst;
+};
+
+struct hubp_setup2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+ union dml2_global_sync_programming *global_sync;
+ struct dc_crtc_timing *timing;
+};
+
+struct hubp_setup_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest;
+};
+
+struct hubp_set_unbounded_requesting_params {
+ struct hubp *hubp;
+ bool unbounded_req;
+};
+
+struct hubp_setup_interdependent2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+};
+
+struct hubp_setup_interdependent_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+};
+
+struct dpp_set_cursor_matrix_params {
+ struct dpp *dpp;
+ enum dc_color_space color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix;
+};
+
+struct mpc_update_mpcc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct dpp_set_scaler_params {
+ struct dpp *dpp;
+ const struct scaler_data *scl_data;
+};
+
+struct hubp_mem_program_viewport_params {
+ struct hubp *hubp;
+ const struct rect *viewport;
+ const struct rect *viewport_c;
+};
+
+struct hubp_program_mcache_id_and_split_coordinate_params {
+ struct hubp *hubp;
+ struct mcache_regs_struct *mcache_regs;
+};
+
+struct set_cursor_attribute_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_sdr_white_level_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_output_csc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum dc_color_space colorspace;
+ uint16_t *matrix;
+ int opp_id;
+};
+
+struct hubp_set_blank_params {
+ struct hubp *hubp;
+ bool blank;
+};
+
+struct phantom_hubp_post_enable_params {
+ struct hubp *hubp;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -173,7 +747,107 @@ union block_sequence_params {
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
- struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
+ struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params;
+ struct program_surface_config_params program_surface_config_params;
+ struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate;
+ struct program_cursor_update_now_params program_cursor_update_now_params;
+ struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params;
+ struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params;
+ struct update_phantom_vp_position_params update_phantom_vp_position_params;
+ struct set_odm_combine_params set_odm_combine_params;
+ struct set_odm_bypass_params set_odm_bypass_params;
+ struct opp_pipe_clock_control_params opp_pipe_clock_control_params;
+ struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params;
+ struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params;
+ struct dsc_set_config_params dsc_set_config_params;
+ struct dsc_enable_params dsc_enable_params;
+ struct tg_set_dsc_config_params tg_set_dsc_config_params;
+ struct dsc_disconnect_params dsc_disconnect_params;
+ struct dsc_read_state_params dsc_read_state_params;
+ struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params;
+ struct dsc_enable_with_opp_params dsc_enable_with_opp_params;
+ struct program_tg_params program_tg_params;
+ struct tg_program_global_sync_params tg_program_global_sync_params;
+ struct tg_wait_for_state_params tg_wait_for_state_params;
+ struct tg_set_vtg_params_params tg_set_vtg_params_params;
+ struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params;
+ struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params;
+ struct tg_set_gsl_params tg_set_gsl_params;
+ struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params;
+ struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params;
+ struct program_det_size_params program_det_size_params;
+ struct program_det_segments_params program_det_segments_params;
+ struct update_dchubp_dpp_params update_dchubp_dpp_params;
+ struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params;
+ struct opp_program_fmt_params opp_program_fmt_params;
+ struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params;
+ struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params;
+ struct set_abm_pipe_params set_abm_pipe_params;
+ struct set_abm_level_params set_abm_level_params;
+ struct set_abm_immediate_disable_params set_abm_immediate_disable_params;
+ struct set_disp_pattern_generator_params set_disp_pattern_generator_params;
+ struct mpc_remove_mpcc_params mpc_remove_mpcc_params;
+ struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params;
+ struct dc_set_optimized_required_params dc_set_optimized_required_params;
+ struct hubp_disconnect_params hubp_disconnect_params;
+ struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params;
+ struct tg_enable_crtc_params tg_enable_crtc_params;
+ struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params;
+ struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params;
+ struct update_force_pstate_params update_force_pstate_params;
+ struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params;
+ struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params;
+ struct tg_get_frame_count_params tg_get_frame_count_params;
+ struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params;
+ struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params;
+ struct mcif_wb_config_buf_params mcif_wb_config_buf_params;
+ struct mcif_wb_config_arb_params mcif_wb_config_arb_params;
+ struct mcif_wb_enable_params mcif_wb_enable_params;
+ struct mcif_wb_disable_params mcif_wb_disable_params;
+ struct dwbc_enable_params dwbc_enable_params;
+ struct dwbc_disable_params dwbc_disable_params;
+ struct dwbc_update_params dwbc_update_params;
+ struct hubp_update_mall_sel_params hubp_update_mall_sel_params;
+ struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params;
+ struct hubp_set_blank_en_params hubp_set_blank_en_params;
+ struct hubp_disable_control_params hubp_disable_control_params;
+ struct hubbub_soft_reset_params hubbub_soft_reset_params;
+ struct hubp_clk_cntl_params hubp_clk_cntl_params;
+ struct hubp_init_params hubp_init_params;
+ struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params;
+ struct hubp_set_flip_int_params hubp_set_flip_int_params;
+ struct dpp_dppclk_control_params dpp_dppclk_control_params;
+ struct disable_phantom_crtc_params disable_phantom_crtc_params;
+ struct dpp_pg_control_params dpp_pg_control_params;
+ struct hubp_pg_control_params hubp_pg_control_params;
+ struct hubp_reset_params hubp_reset_params;
+ struct dpp_reset_params dpp_reset_params;
+ struct dpp_root_clock_control_params dpp_root_clock_control_params;
+ struct dc_ip_request_cntl_params dc_ip_request_cntl_params;
+ struct dsc_pg_status_params dsc_pg_status_params;
+ struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params;
+ struct dsc_disable_params dsc_disable_params;
+ struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params;
+ struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params;
+ struct hubp_vtg_sel_params hubp_vtg_sel_params;
+ struct hubp_setup2_params hubp_setup2_params;
+ struct hubp_setup_params hubp_setup_params;
+ struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params;
+ struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params;
+ struct hubp_setup_interdependent_params hubp_setup_interdependent_params;
+ struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params;
+ struct mpc_update_mpcc_params mpc_update_mpcc_params;
+ struct mpc_update_blending_params mpc_update_blending_params;
+ struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params;
+ struct mpc_insert_plane_params mpc_insert_plane_params;
+ struct dpp_set_scaler_params dpp_set_scaler_params;
+ struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
+ struct set_cursor_attribute_params set_cursor_attribute_params;
+ struct set_cursor_position_params set_cursor_position_params;
+ struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
+ struct program_output_csc_params program_output_csc_params;
+ struct hubp_set_blank_params hubp_set_blank_params;
+ struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params;
};
enum block_sequence_func {
@@ -189,13 +863,110 @@ enum block_sequence_func {
DPP_SETUP_DPP,
DPP_PROGRAM_BIAS_AND_SCALE,
DPP_SET_OUTPUT_TRANSFER_FUNC,
+ DPP_SET_HDR_MULTIPLIER,
MPC_UPDATE_VISUAL_CONFIRM,
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
- DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ DMUB_HW_CONTROL_LOCK_FAST,
+ HUBP_PROGRAM_SURFACE_CONFIG,
+ HUBP_PROGRAM_MCACHE_ID,
+ PROGRAM_CURSOR_UPDATE_NOW,
+ HUBP_WAIT_PIPE_READ_START,
+ HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM,
+ HWS_UPDATE_PHANTOM_VP_POSITION,
+ OPTC_SET_ODM_COMBINE,
+ OPTC_SET_ODM_BYPASS,
+ OPP_PIPE_CLOCK_CONTROL,
+ OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL,
+ DCCG_SET_DTO_DSCCLK,
+ DSC_SET_CONFIG,
+ DSC_ENABLE,
+ TG_SET_DSC_CONFIG,
+ DSC_DISCONNECT,
+ DSC_READ_STATE,
+ DSC_CALCULATE_AND_SET_CONFIG,
+ DSC_ENABLE_WITH_OPP,
+ TG_PROGRAM_GLOBAL_SYNC,
+ TG_WAIT_FOR_STATE,
+ TG_SET_VTG_PARAMS,
+ TG_SETUP_VERTICAL_INTERRUPT2,
+ HUBP_PROGRAM_DET_SIZE,
+ HUBP_PROGRAM_DET_SEGMENTS,
+ OPP_SET_DYN_EXPANSION,
+ OPP_PROGRAM_FMT,
+ OPP_PROGRAM_BIT_DEPTH_REDUCTION,
+ OPP_SET_DISP_PATTERN_GENERATOR,
+ ABM_SET_PIPE,
+ ABM_SET_LEVEL,
+ ABM_SET_IMMEDIATE_DISABLE,
+ MPC_REMOVE_MPCC,
+ OPP_SET_MPCC_DISCONNECT_PENDING,
+ DC_SET_OPTIMIZED_REQUIRED,
+ HUBP_DISCONNECT,
+ HUBBUB_FORCE_PSTATE_CHANGE_CONTROL,
+ TG_ENABLE_CRTC,
+ TG_SET_GSL,
+ TG_SET_GSL_SOURCE_SELECT,
+ HUBP_WAIT_FLIP_PENDING,
+ TG_WAIT_DOUBLE_BUFFER_PENDING,
+ UPDATE_FORCE_PSTATE,
+ PROGRAM_MALL_PIPE_CONFIG,
+ HUBBUB_APPLY_DEDCN21_147_WA,
+ HUBBUB_ALLOW_SELF_REFRESH_CONTROL,
+ TG_GET_FRAME_COUNT,
+ MPC_SET_DWB_MUX,
+ MPC_DISABLE_DWB_MUX,
+ MCIF_WB_CONFIG_BUF,
+ MCIF_WB_CONFIG_ARB,
+ MCIF_WB_ENABLE,
+ MCIF_WB_DISABLE,
+ DWBC_ENABLE,
+ DWBC_DISABLE,
+ DWBC_UPDATE,
+ HUBP_UPDATE_MALL_SEL,
+ HUBP_PREPARE_SUBVP_BUFFERING,
+ HUBP_SET_BLANK_EN,
+ HUBP_DISABLE_CONTROL,
+ HUBBUB_SOFT_RESET,
+ HUBP_CLK_CNTL,
+ HUBP_INIT,
+ HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS,
+ HUBP_SET_FLIP_INT,
+ DPP_DPPCLK_CONTROL,
+ DISABLE_PHANTOM_CRTC,
+ DSC_PG_STATUS,
+ DSC_WAIT_DISCONNECT_PENDING_CLEAR,
+ DSC_DISABLE,
+ DCCG_SET_REF_DSCCLK,
+ DPP_PG_CONTROL,
+ HUBP_PG_CONTROL,
+ HUBP_RESET,
+ DPP_RESET,
+ DPP_ROOT_CLOCK_CONTROL,
+ DC_IP_REQUEST_CNTL,
+ DCCG_UPDATE_DPP_DTO,
+ HUBP_VTG_SEL,
+ HUBP_SETUP2,
+ HUBP_SETUP,
+ HUBP_SET_UNBOUNDED_REQUESTING,
+ HUBP_SETUP_INTERDEPENDENT2,
+ HUBP_SETUP_INTERDEPENDENT,
+ DPP_SET_CURSOR_MATRIX,
+ MPC_UPDATE_BLENDING,
+ MPC_ASSERT_IDLE_MPCC,
+ MPC_INSERT_PLANE,
+ DPP_SET_SCALER,
+ HUBP_MEM_PROGRAM_VIEWPORT,
+ SET_CURSOR_ATTRIBUTE,
+ SET_CURSOR_POSITION,
+ SET_CURSOR_SDR_WHITE_LEVEL,
+ PROGRAM_OUTPUT_CSC,
+ HUBP_SET_LEGACY_TILING_COMPAT_LEVEL,
+ HUBP_SET_BLANK,
+ PHANTOM_HUBP_POST_ENABLE,
/* This must be the last value in this enum, add new ones above */
HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
@@ -205,6 +976,11 @@ struct block_sequence {
enum block_sequence_func func;
};
+struct block_sequence_state {
+ struct block_sequence *steps;
+ unsigned int *num_steps;
+};
+
#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
struct hw_sequencer_funcs {
@@ -222,6 +998,8 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -239,6 +1017,10 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
@@ -310,6 +1092,13 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+ void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe);
/* Colour Related */
void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
@@ -452,13 +1241,13 @@ struct hw_sequencer_funcs {
const struct dc_state *new_ctx);
void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
- void (*fams2_global_control_lock)(struct dc *dc,
+ void (*dmub_hw_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
void (*fams2_update_config)(struct dc *dc,
struct dc_state *context,
bool enable);
- void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
+ void (*dmub_hw_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
@@ -471,11 +1260,23 @@ struct hw_sequencer_funcs {
void (*enable_plane)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*enable_plane_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_dchubp_dpp)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*update_dchubp_dpp_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*post_unlock_reset_opp_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
void (*get_underflow_debug_data)(const struct dc *dc,
struct timing_generator *tg,
struct dc_underflow_debug_data *out_data);
@@ -588,4 +1389,624 @@ void hwss_set_ocsc_default(union block_sequence_params *params);
void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+void hwss_program_surface_config(union block_sequence_params *params);
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params);
+
+void hwss_set_odm_combine(union block_sequence_params *params);
+
+void hwss_set_odm_bypass(union block_sequence_params *params);
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params);
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params);
+
+void hwss_blank_pixel_data(union block_sequence_params *params);
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params);
+
+void hwss_dsc_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable(union block_sequence_params *params);
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params);
+
+void hwss_dsc_disconnect(union block_sequence_params *params);
+
+void hwss_dsc_read_state(union block_sequence_params *params);
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params);
+
+void hwss_program_tg(union block_sequence_params *params);
+
+void hwss_tg_program_global_sync(union block_sequence_params *params);
+
+void hwss_tg_wait_for_state(union block_sequence_params *params);
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params);
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params);
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params);
+
+void hwss_program_det_size(union block_sequence_params *params);
+
+void hwss_program_det_segments(union block_sequence_params *params);
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params);
+
+void hwss_opp_program_fmt(union block_sequence_params *params);
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params);
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params);
+
+void hwss_set_abm_pipe(union block_sequence_params *params);
+
+void hwss_set_abm_level(union block_sequence_params *params);
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params);
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params);
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params);
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params);
+
+void hwss_hubp_disconnect(union block_sequence_params *params);
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params);
+
+void hwss_tg_enable_crtc(union block_sequence_params *params);
+
+void hwss_tg_set_gsl(union block_sequence_params *params);
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params);
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params);
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params);
+
+void hwss_update_force_pstate(union block_sequence_params *params);
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params);
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params);
+
+void hwss_tg_get_frame_count(union block_sequence_params *params);
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params);
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params);
+
+void hwss_mcif_wb_enable(union block_sequence_params *params);
+
+void hwss_mcif_wb_disable(union block_sequence_params *params);
+
+void hwss_dwbc_enable(union block_sequence_params *params);
+
+void hwss_dwbc_disable(union block_sequence_params *params);
+
+void hwss_dwbc_update(union block_sequence_params *params);
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params);
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params);
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params);
+
+void hwss_hubp_disable_control(union block_sequence_params *params);
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params);
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params);
+
+void hwss_hubp_init(union block_sequence_params *params);
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params);
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params);
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params);
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params);
+
+void hwss_dsc_pg_status(union block_sequence_params *params);
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params);
+
+void hwss_dsc_disable(union block_sequence_params *params);
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params);
+
+void hwss_dpp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_reset(union block_sequence_params *params);
+
+void hwss_dpp_reset(union block_sequence_params *params);
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params);
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params);
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params);
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params);
+
+void hwss_hubp_setup2(union block_sequence_params *params);
+
+void hwss_hubp_setup(union block_sequence_params *params);
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params);
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params);
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_update_blending(union block_sequence_params *params);
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_insert_plane(union block_sequence_params *params);
+
+void hwss_dpp_set_scaler(union block_sequence_params *params);
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
+
+void hwss_set_cursor_attribute(union block_sequence_params *params);
+
+void hwss_set_cursor_position(union block_sequence_params *params);
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params);
+
+void hwss_program_output_csc(union block_sequence_params *params);
+
+void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params);
+
+void hwss_hubp_set_blank(union block_sequence_params *params);
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params);
+
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock);
+
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp, bool flip_immediate);
+
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state);
+
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream);
+
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id);
+
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int mpcc_id, bool power_on);
+
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index);
+
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *top_pipe_to_program);
+
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width);
+
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *optc, struct dc_crtc_timing *timing);
+
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines);
+
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, enum crtc_state state);
+
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
+
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line);
+
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult);
+
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb);
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, bool enable, bool wait);
+
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size);
+
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth, enum signal_type signal);
+
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm, uint32_t abm_level);
+
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h);
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt);
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove);
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending);
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc);
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required);
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset);
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable);
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height);
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params);
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id);
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb);
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl);
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor);
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable);
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset);
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable);
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated);
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated);
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank);
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp);
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable);
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high);
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz);
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst);
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing);
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req);
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs);
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level);
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix);
+
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id);
+
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id);
+
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context);
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub);
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied);
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count);
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dsc_optc_config *dsc_optc_cfg,
+ bool enable);
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 1e2d247fbbac..406db231bc72 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -27,6 +27,7 @@
#define __DC_HW_SEQUENCER_PRIVATE_H__
#include "dc_types.h"
+#include "hw_sequencer.h"
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
@@ -80,7 +81,13 @@ struct hwseq_private_funcs {
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect_sequence)(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*set_input_transfer_func)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -97,6 +104,10 @@ struct hwseq_private_funcs {
void (*blank_pixel_data)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank);
+ void (*blank_pixel_data_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -105,6 +116,8 @@ struct hwseq_private_funcs {
bool enable);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_blank)(struct dc *dc, struct timing_generator *tg);
void (*disable_vga)(struct dce_hwseq *hws);
@@ -112,6 +125,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_power_down)(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+ void (*plane_atomic_power_down_sequence)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
@@ -140,15 +157,31 @@ struct hwseq_private_funcs {
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*update_odm_sequence)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context);
+ void (*program_all_writeback_pipes_in_tree_sequence)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*s0i3_golden_init_wa)(struct dc *dc);
void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*verify_allow_pstate_change_high_sequence)(struct dc *dc,
+ struct block_sequence_state *seq_state);
void (*program_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*program_pipe_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
void (*dccg_init)(struct dce_hwseq *hws);
bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
@@ -163,6 +196,8 @@ struct hwseq_private_funcs {
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
+ void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
@@ -186,6 +221,7 @@ struct hwseq_private_funcs {
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*dc_ip_request_cntl)(struct dc *dc, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d11893f8c916..5ed2cd344804 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -58,8 +58,8 @@
#include "transform.h"
#include "dpp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
struct resource_pool;
struct dc_state;
@@ -274,7 +274,7 @@ struct resource_pool {
/* An array for accessing the link encoder objects that have been created.
* Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA
*/
- struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS];
+ struct link_encoder *link_encoders[MAX_LINK_ENCODERS];
/* Number of DIG link encoder objects created - i.e. number of valid
* entries in link_encoders array.
*/
@@ -514,7 +514,7 @@ struct pipe_ctx {
struct link_enc_cfg_context {
enum link_enc_cfg_mode mode;
struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ enum engine_id link_enc_avail[MAX_LINK_ENCODERS];
struct link_enc_assignment transient_assignments[MAX_PIPES];
};
@@ -526,8 +526,8 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
- unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS];
- int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS];
+ unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 45645f9fd86c..7ce2f417f86a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp {
} size;
union reg_cursor_settings_cfg {
struct {
- uint32_t dst_y_offset: 8;
- uint32_t chunk_hdl_adjust: 2;
- uint32_t reserved: 22;
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
} bits;
uint32_t raw;
} settings;
@@ -83,12 +83,34 @@ union reg_cur0_control_cfg {
} bits;
uint32_t raw;
};
+
struct cursor_position_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
};
struct cursor_attribute_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
+ union reg_cur0_fp_scale_bias {
+ struct {
+ uint32_t fp_bias: 16;
+ uint32_t fp_scale: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias;
+ union reg_cur0_fp_scale_bias_g_y {
+ struct {
+ uint32_t fp_bias_g_y: 16;
+ uint32_t fp_scale_g_y: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_g_y;
+ union reg_cur0_fp_scale_bias_rb_crcb {
+ struct {
+ uint32_t fp_bias_rb_crcb: 16;
+ uint32_t fp_scale_rb_crcb: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_rb_crcb;
};
struct cursor_attributes_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 61c4d2a7db1c..500a601e99b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -71,6 +71,125 @@ enum pixel_rate_div {
PIXEL_RATE_DIV_NA = 0xF
};
+struct dcn_dccg_reg_state {
+ uint32_t dc_mem_global_pwr_req_cntl;
+ uint32_t dccg_audio_dtbclk_dto_modulo;
+ uint32_t dccg_audio_dtbclk_dto_phase;
+ uint32_t dccg_audio_dto_source;
+ uint32_t dccg_audio_dto0_module;
+ uint32_t dccg_audio_dto0_phase;
+ uint32_t dccg_audio_dto1_module;
+ uint32_t dccg_audio_dto1_phase;
+ uint32_t dccg_cac_status;
+ uint32_t dccg_cac_status2;
+ uint32_t dccg_disp_cntl_reg;
+ uint32_t dccg_ds_cntl;
+ uint32_t dccg_ds_dto_incr;
+ uint32_t dccg_ds_dto_modulo;
+ uint32_t dccg_ds_hw_cal_interval;
+ uint32_t dccg_gate_disable_cntl;
+ uint32_t dccg_gate_disable_cntl2;
+ uint32_t dccg_gate_disable_cntl3;
+ uint32_t dccg_gate_disable_cntl4;
+ uint32_t dccg_gate_disable_cntl5;
+ uint32_t dccg_gate_disable_cntl6;
+ uint32_t dccg_global_fgcg_rep_cntl;
+ uint32_t dccg_gtc_cntl;
+ uint32_t dccg_gtc_current;
+ uint32_t dccg_gtc_dto_incr;
+ uint32_t dccg_gtc_dto_modulo;
+ uint32_t dccg_perfmon_cntl;
+ uint32_t dccg_perfmon_cntl2;
+ uint32_t dccg_soft_reset;
+ uint32_t dccg_test_clk_sel;
+ uint32_t dccg_vsync_cnt_ctrl;
+ uint32_t dccg_vsync_cnt_int_ctrl;
+ uint32_t dccg_vsync_otg0_latch_value;
+ uint32_t dccg_vsync_otg1_latch_value;
+ uint32_t dccg_vsync_otg2_latch_value;
+ uint32_t dccg_vsync_otg3_latch_value;
+ uint32_t dccg_vsync_otg4_latch_value;
+ uint32_t dccg_vsync_otg5_latch_value;
+ uint32_t dispclk_cgtt_blk_ctrl_reg;
+ uint32_t dispclk_freq_change_cntl;
+ uint32_t dp_dto_dbuf_en;
+ uint32_t dp_dto0_modulo;
+ uint32_t dp_dto0_phase;
+ uint32_t dp_dto1_modulo;
+ uint32_t dp_dto1_phase;
+ uint32_t dp_dto2_modulo;
+ uint32_t dp_dto2_phase;
+ uint32_t dp_dto3_modulo;
+ uint32_t dp_dto3_phase;
+ uint32_t dpiaclk_540m_dto_modulo;
+ uint32_t dpiaclk_540m_dto_phase;
+ uint32_t dpiaclk_810m_dto_modulo;
+ uint32_t dpiaclk_810m_dto_phase;
+ uint32_t dpiaclk_dto_cntl;
+ uint32_t dpiasymclk_cntl;
+ uint32_t dppclk_cgtt_blk_ctrl_reg;
+ uint32_t dppclk_ctrl;
+ uint32_t dppclk_dto_ctrl;
+ uint32_t dppclk0_dto_param;
+ uint32_t dppclk1_dto_param;
+ uint32_t dppclk2_dto_param;
+ uint32_t dppclk3_dto_param;
+ uint32_t dprefclk_cgtt_blk_ctrl_reg;
+ uint32_t dprefclk_cntl;
+ uint32_t dpstreamclk_cntl;
+ uint32_t dscclk_dto_ctrl;
+ uint32_t dscclk0_dto_param;
+ uint32_t dscclk1_dto_param;
+ uint32_t dscclk2_dto_param;
+ uint32_t dscclk3_dto_param;
+ uint32_t dtbclk_dto_dbuf_en;
+ uint32_t dtbclk_dto0_modulo;
+ uint32_t dtbclk_dto0_phase;
+ uint32_t dtbclk_dto1_modulo;
+ uint32_t dtbclk_dto1_phase;
+ uint32_t dtbclk_dto2_modulo;
+ uint32_t dtbclk_dto2_phase;
+ uint32_t dtbclk_dto3_modulo;
+ uint32_t dtbclk_dto3_phase;
+ uint32_t dtbclk_p_cntl;
+ uint32_t force_symclk_disable;
+ uint32_t hdmicharclk0_clock_cntl;
+ uint32_t hdmistreamclk_cntl;
+ uint32_t hdmistreamclk0_dto_param;
+ uint32_t microsecond_time_base_div;
+ uint32_t millisecond_time_base_div;
+ uint32_t otg_pixel_rate_div;
+ uint32_t otg0_phypll_pixel_rate_cntl;
+ uint32_t otg0_pixel_rate_cntl;
+ uint32_t otg1_phypll_pixel_rate_cntl;
+ uint32_t otg1_pixel_rate_cntl;
+ uint32_t otg2_phypll_pixel_rate_cntl;
+ uint32_t otg2_pixel_rate_cntl;
+ uint32_t otg3_phypll_pixel_rate_cntl;
+ uint32_t otg3_pixel_rate_cntl;
+ uint32_t phyasymclk_clock_cntl;
+ uint32_t phybsymclk_clock_cntl;
+ uint32_t phycsymclk_clock_cntl;
+ uint32_t phydsymclk_clock_cntl;
+ uint32_t phyesymclk_clock_cntl;
+ uint32_t phyplla_pixclk_resync_cntl;
+ uint32_t phypllb_pixclk_resync_cntl;
+ uint32_t phypllc_pixclk_resync_cntl;
+ uint32_t phyplld_pixclk_resync_cntl;
+ uint32_t phyplle_pixclk_resync_cntl;
+ uint32_t refclk_cgtt_blk_ctrl_reg;
+ uint32_t socclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_psp_cntl;
+ uint32_t symclk32_le_cntl;
+ uint32_t symclk32_se_cntl;
+ uint32_t symclka_clock_enable;
+ uint32_t symclkb_clock_enable;
+ uint32_t symclkc_clock_enable;
+ uint32_t symclkd_clock_enable;
+ uint32_t symclke_clock_enable;
+};
+
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
@@ -81,7 +200,6 @@ struct dccg {
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
};
-
struct dtbclk_dto_params {
const struct dc_crtc_timing *timing;
int otg_inst;
@@ -214,6 +332,7 @@ struct dccg_funcs {
void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
+ void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 843a18287c83..dafc8490efb5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,14 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct dcn_hubbub_reg_state {
+ uint32_t det0_ctrl;
+ uint32_t det1_ctrl;
+ uint32_t det2_ctrl;
+ uint32_t det3_ctrl;
+ uint32_t compbuf_ctrl;
+};
+
struct hubbub_system_latencies {
uint32_t max_latency_ns;
uint32_t avg_latency_ns;
@@ -216,6 +224,8 @@ struct hubbub_funcs {
void (*init_watermarks)(struct hubbub *hubbub);
+ void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
/**
* @program_det_size:
*
@@ -242,17 +252,37 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
- void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes);
- uint32_t (*compbuf_config_error)(struct hubbub *hubbub);
- struct hubbub_perfmon_funcs{
- void (*start_system_latency_measurement)(struct hubbub *hubbub);
- void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies);
- void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub);
- void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps);
- void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params);
- void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns);
+ struct hubbub_perfmon_funcs {
void (*reset)(struct hubbub *hubbub);
+ void (*start_measuring_max_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_average_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_urgent_ramp_latency_ns)(
+ struct hubbub *hubbub,
+ const struct hubbub_urgent_latency_params *params);
+ uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz);
+ void (*start_measuring_unbounded_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *duration_ns);
+ void (*start_measuring_average_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t min_duration_ns,
+ uint32_t *duration_ns);
} perfmon;
+
+ struct hubbub_qos_funcs {
+ void (*force_display_nominal_profile)(struct hubbub *hubbub);
+ void (*force_display_urgent_profile)(struct hubbub *hubbub);
+ void (*reset_display_qos_profile)(struct hubbub *hubbub);
+ } qos;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 1b7c085dc2cc..d88b57d4f512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -65,7 +65,6 @@ union defer_reg_writes {
} bits;
uint32_t raw;
};
-
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -84,6 +83,7 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+ bool cursor_offload;
struct cursor_position_cache_dpp pos;
struct cursor_attribute_cache_dpp att;
@@ -202,6 +202,19 @@ struct dcn_dpp_state {
uint32_t gamcor_mode;
};
+struct dcn_dpp_reg_state {
+ uint32_t recout_start;
+ uint32_t recout_size;
+ uint32_t scl_horz_filter_scale_ratio;
+ uint32_t scl_vert_filter_scale_ratio;
+ uint32_t scl_mode;
+ uint32_t cm_control;
+ uint32_t dpp_control;
+ uint32_t dscl_control;
+ uint32_t obuf_control;
+ uint32_t mpc_size;
+};
+
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
@@ -225,6 +238,8 @@ struct dpp_funcs {
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+ void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 2b874d2cc61c..a79019365af8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -41,8 +41,8 @@
#include "mem_input.h"
#include "cursor_reg_cache.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -126,11 +126,13 @@ struct hubp {
int mpcc_id;
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
+ bool cursor_offload;
bool power_gated;
struct cursor_position_cache_hubp pos;
struct cursor_attribute_cache_hubp att;
struct cursor_rect cur_rect;
+ bool use_mall_for_cursor;
};
struct surface_flip_registers {
@@ -236,6 +238,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..5e2813e9ae2f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,7 +44,37 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
+/**
+ * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders
+ *
+ * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7,
+ * although not every GPU may have that many.
+ */
+#define MAX_DIG_LINK_ENCODERS 7
+
+/**
+ * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders
+ *
+ * Analog encoders are ENGINE_ID_DACA/B, there are at most 2,
+ * although not every GPU may have that many. Modern GPUs typically
+ * don't have analog encoders.
+ */
+#define MAX_DAC_LINK_ENCODERS 2
+
+/**
+ * define MAX_LINK_ENCODERS - maximum number link encoders in total
+ *
+ * This includes both analog and digital encoders.
+ */
+#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS)
+
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 08c16ba52a51..df512920a9fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -47,6 +47,7 @@ struct encoder_init_data {
enum hpd_source_id hpd_source;
/* TODO: in DAL2, here was pointer to EventManagerInterface */
struct graphics_object_id encoder;
+ enum engine_id analog_engine;
struct dc_context *ctx;
enum transmitter transmitter;
};
@@ -83,6 +84,7 @@ struct link_encoder {
struct graphics_object_id connector;
uint32_t output_signals;
enum engine_id preferred_engine;
+ enum engine_id analog_engine;
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 42fbc70f7056..d468bc85566a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -29,7 +29,7 @@
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 22960ee03dee..a8d1abe20f62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -350,6 +350,15 @@ struct mpcc_state {
struct mpc_rmcm_regs rmcm_regs;
};
+struct dcn_mpc_reg_state {
+ uint32_t mpcc_bot_sel;
+ uint32_t mpcc_control;
+ uint32_t mpcc_status;
+ uint32_t mpcc_top_sel;
+ uint32_t mpcc_opp_id;
+ uint32_t mpcc_ogam_control;
+};
+
/**
* struct mpc_funcs - funcs
*/
@@ -373,6 +382,24 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
+ /**
+ * @mpc_read_reg_state:
+ *
+ * Read MPC register state for debugging underflow purposes.
+ *
+ * Parameters:
+ *
+ * - [in] mpc - MPC context
+ * - [out] reg_state - MPC register state structure
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*mpc_read_reg_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
/**
* @insert_plane:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 747679cb4944..e1428a83ecbc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,16 @@ struct oppbuf_params {
uint32_t num_segment_padded_pixels;
};
+struct dcn_opp_reg_state {
+ uint32_t dpg_control;
+ uint32_t fmt_control;
+ uint32_t oppbuf_control;
+ uint32_t opp_pipe_control;
+ uint32_t opp_pipe_crc_control;
+ uint32_t opp_abm_control;
+ uint32_t dscrm_dsc_forward_config;
+};
+
struct opp_funcs {
@@ -368,6 +378,9 @@ struct opp_funcs {
struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding,
bool is_primary);
+
+ void (*opp_read_reg_state)(
+ struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f2de2cf23859..da7bf59c4b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -175,6 +175,135 @@ struct dcn_otg_state {
uint32_t otg_double_buffer_control;
};
+struct dcn_optc_reg_state {
+ uint32_t optc_bytes_per_pixel;
+ uint32_t optc_data_format_control;
+ uint32_t optc_data_source_select;
+ uint32_t optc_input_clock_control;
+ uint32_t optc_input_global_control;
+ uint32_t optc_input_spare_register;
+ uint32_t optc_memory_config;
+ uint32_t optc_rsmu_underflow;
+ uint32_t optc_underflow_threshold;
+ uint32_t optc_width_control;
+
+ uint32_t otg_3d_structure_control;
+ uint32_t otg_clock_control;
+ uint32_t otg_control;
+ uint32_t otg_count_control;
+ uint32_t otg_count_reset;
+ uint32_t otg_crc_cntl;
+ uint32_t otg_crc_sig_blue_control_mask;
+ uint32_t otg_crc_sig_red_green_mask;
+ uint32_t otg_crc0_data_b;
+ uint32_t otg_crc0_data_rg;
+ uint32_t otg_crc0_windowa_x_control;
+ uint32_t otg_crc0_windowa_x_control_readback;
+ uint32_t otg_crc0_windowa_y_control;
+ uint32_t otg_crc0_windowa_y_control_readback;
+ uint32_t otg_crc0_windowb_x_control;
+ uint32_t otg_crc0_windowb_x_control_readback;
+ uint32_t otg_crc0_windowb_y_control;
+ uint32_t otg_crc0_windowb_y_control_readback;
+ uint32_t otg_crc1_data_b;
+ uint32_t otg_crc1_data_rg;
+ uint32_t otg_crc1_windowa_x_control;
+ uint32_t otg_crc1_windowa_x_control_readback;
+ uint32_t otg_crc1_windowa_y_control;
+ uint32_t otg_crc1_windowa_y_control_readback;
+ uint32_t otg_crc1_windowb_x_control;
+ uint32_t otg_crc1_windowb_x_control_readback;
+ uint32_t otg_crc1_windowb_y_control;
+ uint32_t otg_crc1_windowb_y_control_readback;
+ uint32_t otg_crc2_data_b;
+ uint32_t otg_crc2_data_rg;
+ uint32_t otg_crc3_data_b;
+ uint32_t otg_crc3_data_rg;
+ uint32_t otg_dlpc_control;
+ uint32_t otg_double_buffer_control;
+ uint32_t otg_drr_control2;
+ uint32_t otg_drr_control;
+ uint32_t otg_drr_timing_int_status;
+ uint32_t otg_drr_trigger_window;
+ uint32_t otg_drr_v_total_change;
+ uint32_t otg_drr_v_total_reach_range;
+ uint32_t otg_dsc_start_position;
+ uint32_t otg_force_count_now_cntl;
+ uint32_t otg_global_control0;
+ uint32_t otg_global_control1;
+ uint32_t otg_global_control2;
+ uint32_t otg_global_control3;
+ uint32_t otg_global_control4;
+ uint32_t otg_global_sync_status;
+ uint32_t otg_gsl_control;
+ uint32_t otg_gsl_vsync_gap;
+ uint32_t otg_gsl_window_x;
+ uint32_t otg_gsl_window_y;
+ uint32_t otg_h_blank_start_end;
+ uint32_t otg_h_sync_a;
+ uint32_t otg_h_sync_a_cntl;
+ uint32_t otg_h_timing_cntl;
+ uint32_t otg_h_total;
+ uint32_t otg_interlace_control;
+ uint32_t otg_interlace_status;
+ uint32_t otg_interrupt_control;
+ uint32_t otg_long_vblank_status;
+ uint32_t otg_m_const_dto0;
+ uint32_t otg_m_const_dto1;
+ uint32_t otg_manual_force_vsync_next_line;
+ uint32_t otg_master_en;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_master_update_mode;
+ uint32_t otg_nom_vert_position;
+ uint32_t otg_pipe_update_status;
+ uint32_t otg_pixel_data_readback0;
+ uint32_t otg_pixel_data_readback1;
+ uint32_t otg_request_control;
+ uint32_t otg_snapshot_control;
+ uint32_t otg_snapshot_frame;
+ uint32_t otg_snapshot_position;
+ uint32_t otg_snapshot_status;
+ uint32_t otg_spare_register;
+ uint32_t otg_static_screen_control;
+ uint32_t otg_status;
+ uint32_t otg_status_frame_count;
+ uint32_t otg_status_hv_count;
+ uint32_t otg_status_position;
+ uint32_t otg_status_vf_count;
+ uint32_t otg_stereo_control;
+ uint32_t otg_stereo_force_next_eye;
+ uint32_t otg_stereo_status;
+ uint32_t otg_trig_manual_control;
+ uint32_t otg_triga_cntl;
+ uint32_t otg_triga_manual_trig;
+ uint32_t otg_trigb_cntl;
+ uint32_t otg_trigb_manual_trig;
+ uint32_t otg_update_lock;
+ uint32_t otg_v_blank_start_end;
+ uint32_t otg_v_count_stop_control;
+ uint32_t otg_v_count_stop_control2;
+ uint32_t otg_v_sync_a;
+ uint32_t otg_v_sync_a_cntl;
+ uint32_t otg_v_total;
+ uint32_t otg_v_total_control;
+ uint32_t otg_v_total_int_status;
+ uint32_t otg_v_total_max;
+ uint32_t otg_v_total_mid;
+ uint32_t otg_v_total_min;
+ uint32_t otg_vert_sync_control;
+ uint32_t otg_vertical_interrupt0_control;
+ uint32_t otg_vertical_interrupt0_position;
+ uint32_t otg_vertical_interrupt1_control;
+ uint32_t otg_vertical_interrupt1_position;
+ uint32_t otg_vertical_interrupt2_control;
+ uint32_t otg_vertical_interrupt2_position;
+ uint32_t otg_vready_param;
+ uint32_t otg_vstartup_param;
+ uint32_t otg_vsync_nom_int_status;
+ uint32_t otg_vupdate_keepout;
+ uint32_t otg_vupdate_param;
+};
+
/**
* struct timing_generator - Entry point to Output Timing Generator feature.
*/
@@ -381,6 +510,7 @@ struct timing_generator_funcs {
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
+ void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 4e26a16a8743..79746d931471 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -49,6 +49,7 @@ struct resource_caps {
int num_video_plane;
int num_audio;
int num_stream_encoder;
+ int num_analog_stream_encoder;
int num_pll;
int num_dwb;
int num_ddc;
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 9e33bf937a69..1045c268672e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}
@@ -876,7 +877,7 @@ bool dp_set_test_pattern(
return false;
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -924,7 +925,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 892907991f91..befa67b2b2ae 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
return;
}
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
@@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (stream_enc->funcs->enable_stream)
stream_enc->funcs->enable_stream(stream_enc,
pipe_ctx->stream->signal, false);
- link_enc->funcs->connect_dig_be_to_fe(
- link_enc,
- pipe_ctx->stream_res.stream_enc->id,
- false);
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
pipe_ctx->stream->link,
@@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- if (!dc_is_virtual_signal(stream->signal))
+ if (!dc_is_virtual_signal(stream->signal) &&
+ !dc_is_rgb_signal(stream->signal))
stream_encoder->funcs->setup_stereo_sync(
stream_encoder,
pipe_ctx->stream_res.tg->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 85303167a553..c417780f37bc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service,
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+ if (ddc_service->link->local_sink &&
+ !ddc_service->link->local_sink->edid_caps.scdc_present)
+ return;
+
link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
sizeof(sink->scdc_caps.manufacturer_OUI.byte));
@@ -858,6 +862,79 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
verify_link_capability_non_destructive(link);
}
+/**
+ * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable.
+ *
+ * Evaluates an 8-byte EDID header to check if it's good enough
+ * for the purpose of determining whether a display is connected
+ * without reading the full EDID.
+ */
+static bool link_detect_evaluate_edid_header(uint8_t edid_header[8])
+{
+ int edid_header_score = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff);
+
+ return edid_header_score >= 6;
+}
+
+/**
+ * link_detect_ddc_probe() - Probe the DDC to see if a display is connected.
+ *
+ * Detect whether a display is connected to DDC without reading full EDID.
+ * Reads only the EDID header (the first 8 bytes of EDID) from DDC and
+ * evaluates whether that matches.
+ */
+static bool link_detect_ddc_probe(struct dc_link *link)
+{
+ if (!link->ddc)
+ return false;
+
+ uint8_t edid_header[8] = {0};
+ bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header));
+
+ if (!ddc_probed)
+ return false;
+
+ if (!link_detect_evaluate_edid_header(edid_header))
+ return false;
+
+ return true;
+}
+
+/**
+ * link_detect_dac_load_detect() - Performs DAC load detection.
+ *
+ * Load detection can be used to detect the presence of an
+ * analog display when we can't read DDC. This causes a visible
+ * visual glitch so it should be used sparingly.
+ */
+static bool link_detect_dac_load_detect(struct dc_link *link)
+{
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct link_encoder *link_enc = link->link_enc;
+ enum engine_id engine_id = link_enc->preferred_engine;
+ enum dal_device_type device_type = DEVICE_TYPE_CRT;
+ enum bp_result bp_result;
+ uint32_t enum_id;
+
+ switch (engine_id) {
+ case ENGINE_ID_DACB:
+ enum_id = 2;
+ break;
+ case ENGINE_ID_DACA:
+ default:
+ engine_id = ENGINE_ID_DACA;
+ enum_id = 1;
+ break;
+ }
+
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ return bp_result == BP_RESULT_OK;
+}
+
/*
* detect_link_and_local_sink() - Detect if a sink is attached to a given link
*
@@ -942,6 +1019,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ case SIGNAL_TYPE_RGB: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_RGB;
+ break;
+ }
+
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
@@ -1066,7 +1149,30 @@ static bool detect_link_and_local_sink(struct dc_link *link,
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
+ /* Analog connectors without EDID:
+ * - old monitor that actually doesn't have EDID
+ * - cheap DVI-A cable or adapter that doesn't connect DDC
+ */
+ if (dc_connector_supports_analog(link->link_id.id)) {
+ /* If we didn't do DAC load detection yet, do it now
+ * to verify there really is a display connected.
+ */
+ if (link->type != dc_connection_dac_load &&
+ !link_detect_dac_load_detect(link)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+ return false;
+ }
+
+ DC_LOG_INFO("%s detected analog display without EDID\n", __func__);
+ link->type = dc_connection_dac_load;
+ sink->edid_caps.analog = true;
+ break;
+ }
+
DC_LOG_ERROR("No EDID read.\n");
+
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -1103,6 +1209,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ sink->edid_caps.analog &= dc_connector_supports_analog(link->link_id.id);
+
// Check if edid is the same
if ((prev_sink) &&
(edid_status == EDID_THE_SAME || edid_status == EDID_OK))
@@ -1133,9 +1241,17 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
- query_hdcp_capability(sink->sink_signal, link);
+
+ if (!sink->edid_caps.analog)
+ query_hdcp_capability(sink->sink_signal, link);
}
+ /* DVI-I connector connected to analog display. */
+ if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ sink->edid_caps.analog)
+ sink->sink_signal = SIGNAL_TYPE_RGB;
+
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
@@ -1232,6 +1348,28 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
+/**
+ * link_detect_analog() - Determines if an analog sink is connected.
+ */
+static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type)
+{
+ /* Don't care about connectors that don't support an analog signal. */
+ ASSERT(dc_connector_supports_analog(link->link_id.id));
+
+ if (link_detect_ddc_probe(link)) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link_detect_dac_load_detect(link)) {
+ *type = dc_connection_dac_load;
+ return true;
+ }
+
+ *type = dc_connection_none;
+ return true;
+}
+
/*
* link_detect_connection_type() - Determine if there is a sink connected
*
@@ -1248,6 +1386,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
return true;
}
+ /* Ignore the HPD pin (if any) for analog connectors.
+ * Instead rely on DDC and DAC.
+ *
+ * - VGA connectors don't have any HPD at all.
+ * - Some DVI-A cables don't connect the HPD pin.
+ * - Some DVI-A cables pull up the HPD pin.
+ * (So it's high even when no display is connected.)
+ */
+ if (dc_connector_supports_analog(link->link_id.id))
+ return link_detect_analog(link, type);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
if (!link->dc->config.edp_no_power_sequencing)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 83419e1a9036..4ddcdc222913 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
@@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
@@ -2256,6 +2258,9 @@ static enum dc_status enable_link(
enable_link_lvds(pipe_ctx);
status = DC_OK;
break;
+ case SIGNAL_TYPE_RGB:
+ status = DC_OK;
+ break;
case SIGNAL_TYPE_VIRTUAL:
status = enable_link_virtual(pipe_ctx);
break;
@@ -2369,7 +2374,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
set_avmute(pipe_ctx, true);
}
- dc->hwss.disable_audio_stream(pipe_ctx);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dc->hwss.disable_audio_stream(pipe_ctx);
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
@@ -2654,7 +2660,8 @@ void link_set_dpms_on(
enable_stream_features(pipe_ctx);
update_psp_stream_config(pipe_ctx, false);
- dc->hwss.enable_audio_stream(pipe_ctx);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dc->hwss.enable_audio_stream(pipe_ctx);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
set_avmute(pipe_ctx, false);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 31a73867cd4c..7989baf3843c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -451,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
+static enum engine_id find_analog_engine(struct dc_link *link)
+{
+ struct dc_bios *bp = link->ctx->dc_bios;
+ struct graphics_object_id encoder = {0};
+ enum bp_result bp_result = BP_RESULT_OK;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder);
+
+ if (bp_result != BP_RESULT_OK)
+ return ENGINE_ID_UNKNOWN;
+
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENGINE_ID_DACA;
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENGINE_ID_DACB;
+ }
+ }
+
+ return ENGINE_ID_UNKNOWN;
+}
+
+static bool transmitter_supported(const enum transmitter transmitter)
+{
+ return transmitter != TRANSMITTER_UNKNOWN &&
+ transmitter != TRANSMITTER_NUTMEG_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_LCD;
+}
+
+static bool analog_engine_supported(const enum engine_id engine_id)
+{
+ return engine_id == ENGINE_ID_DACA ||
+ engine_id == ENGINE_ID_DACB;
+}
+
static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -482,6 +522,19 @@ static bool construct_phy(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ /* Determine early if the link has any supported encoders,
+ * so that we avoid initializing DDC and HPD, etc.
+ */
+ bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
+ enc_init_data.analog_engine = find_analog_engine(link);
+
+ if (!transmitter_supported(enc_init_data.transmitter) &&
+ !analog_engine_supported(enc_init_data.analog_engine)) {
+ DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
+ return false;
+ }
+
link->ep_type = DISPLAY_ENDPOINT_PHY;
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
@@ -530,6 +583,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_DUAL_LINK_DVII:
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
+ case CONNECTOR_ID_VGA:
+ link->connector_signal = SIGNAL_TYPE_RGB;
+ break;
case CONNECTOR_ID_DISPLAY_PORT:
case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
@@ -611,16 +667,12 @@ static bool construct_phy(struct dc_link *link,
dal_ddc_get_line(get_ddc_pin(link->ddc));
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
link->hpd_src = enc_init_data.hpd_source;
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
@@ -817,9 +869,6 @@ static bool construct_dpia(struct dc_link *link,
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
-
return true;
ddc_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 267180e7bc48..5d2bcce2f669 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service,
/*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
- ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite ||
+ !ddc_service->link->local_sink->edid_caps.scdc_present))
return;
link_query_ddc_data(ddc_service, slave_address, &offset,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index b12c11bd6a14..750147c52c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
- if (link->fec_state == dc_link_fec_enabled)
+ if (link->dc->debug.disable_fec)
+ force_disable = true;
+ else if (link->fec_state == dc_link_fec_enabled)
force_disable = false;
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
@@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw(
return link_rate;
}
+static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link)
+{
+ enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN;
+
+ if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX)
+ lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count > 0)
+ lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+
+ return lttpr_max_lane_count;
+}
+
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
@@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
break;
}
+ /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_link_rate > 0)
+ lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
@@ -1845,6 +1867,12 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
+ /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */
+ if (link->dpcd_caps.is_mst_capable)
+ link->wa_flags.dp_mot_reset_segment = true;
+ else
+ link->wa_flags.dp_mot_reset_segment = false;
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2241,6 +2269,7 @@ const struct dc_link_settings *dp_get_verified_link_cap(
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+ enum dc_lane_count lttpr_max_lane_count;
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
@@ -2305,8 +2334,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
/* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */
if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_lane_count = get_lttpr_max_lane_count(link);
+
+ if (lttpr_max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = lttpr_max_lane_count;
+
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
if (lttpr_max_link_rate < max_link_cap.link_rate)
@@ -2412,6 +2444,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_detect_lt_init(link);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d",
+ __func__, link->link_index,
+ known_limit_link_setting->link_rate,
+ known_limit_link_setting->lane_count);
+
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa)
apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
@@ -2448,6 +2485,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_lt_fail_count_update(link, fail_count, true);
dp_trace_set_lt_end_timestamp(link, true);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d",
+ __func__, link->link_index,
+ success,
+ fail_count);
+
return success;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 8a3c18ae97a7..c958d3f600c8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -225,11 +225,6 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index);
- return false;
- }
-
val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
@@ -273,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
- } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ }
+
+ if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
- } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ }
+
+ if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) {
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+
+ DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity);
+ }
+
+ if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 693477413347..4b01ab0a5a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
* Don't handle RX IRQ unless one of following is met:
* 1) The link is established (cur_link_settings != unknown)
* 2) We know we're dealing with a branch device, SST or MST
+ * 3) The link is bw_alloc enabled.
*/
if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
+ is_dp_branch_device(link) ||
+ link->dpia_bw_alloc_config.bw_alloc_enabled)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 85298b8a1b5e..6bfd2c1294e5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state(
MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
}
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]);
+ mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]);
+ mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]);
+ mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]);
+
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index 103f29900a2c..e2f147d17178 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
+
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
enum dc_lut_mode mpc3_get_ogam_current(
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index 6f0e017a8ae2..83bbbf34bcac 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index e1a0308dee57..eeac13fdd6f5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -598,6 +598,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..45d418636d0c 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
@@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
+
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
.dpg_is_pending = NULL,
- .opp_destroy = opp1_destroy
+ .opp_destroy = opp1_destroy,
+ .opp_read_reg_state = opp1_read_reg_state
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..38d0d530a9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
@@ -63,7 +63,8 @@
uint32_t OPPBUF_CONTROL1; \
uint32_t OPPBUF_3D_PARAMETERS_0; \
uint32_t OPPBUF_3D_PARAMETERS_1; \
- uint32_t OPP_PIPE_CONTROL
+ uint32_t OPP_PIPE_CONTROL; \
+ uint32_t OPP_PIPE_CRC_CONTROL
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -153,7 +154,6 @@ struct dcn10_opp {
const struct dcn10_opp_registers *regs;
const struct dcn10_opp_shift *opp_shift;
const struct dcn10_opp_mask *opp_mask;
-
bool is_write_to_ram_a_safe;
};
@@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
void opp1_destroy(struct output_pixel_processor **opp);
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..ce826a5be4c7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
@@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp
return 0;
}
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
.opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count,
+ .opp_read_reg_state = opp2_read_reg_state
};
void dcn20_opp_construct(struct dcn20_opp *oppn20,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..fb0c047c1788 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
@@ -59,7 +59,8 @@
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
uint32_t DPG_RAMP_CONTROL; \
- uint32_t DPG_STATUS
+ uint32_t DPG_STATUS; \
+ uint32_t DSCRM_DSC_FORWARD_CONFIG
#define OPP_DPG_MASK_SH_LIST(mask_sh) \
OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \
@@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel (
uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding, bool is_primary);
+
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
index 3542b51c9aac..e11c4e16402f 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
@@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable)
{
REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable);
}
+
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
index a9a413527801..c6cace90e8f2 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
@@ -31,7 +31,8 @@
#define OPP_REG_VARIABLE_LIST_DCN3_5 \
OPP_REG_VARIABLE_LIST_DCN2_0; \
- uint32_t OPP_TOP_CLK_CONTROL
+ uint32_t OPP_TOP_CLK_CONTROL; \
+ uint32_t OPP_ABM_CONTROL
#define OPP_MASK_SH_LIST_DCN35(mask_sh) \
OPP_MASK_SH_LIST_DCN20(mask_sh), \
@@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20,
void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable);
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 8b2a8455eb56..803bcc25601c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -209,7 +209,43 @@
uint32_t OPTC_WIDTH_CONTROL2; \
uint32_t OTG_PSTATE_REGISTER; \
uint32_t OTG_PIPE_UPDATE_STATUS; \
- uint32_t INTERRUPT_DEST
+ uint32_t INTERRUPT_DEST; \
+ uint32_t OPTC_INPUT_SPARE_REGISTER; \
+ uint32_t OPTC_RSMU_UNDERFLOW; \
+ uint32_t OPTC_UNDERFLOW_THRESHOLD; \
+ uint32_t OTG_COUNT_CONTROL; \
+ uint32_t OTG_COUNT_RESET; \
+ uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
+ uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
+ uint32_t OTG_DLPC_CONTROL; \
+ uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_TIMING_INT_STATUS; \
+ uint32_t OTG_GLOBAL_CONTROL3; \
+ uint32_t OTG_GLOBAL_SYNC_STATUS; \
+ uint32_t OTG_GSL_VSYNC_GAP; \
+ uint32_t OTG_INTERLACE_STATUS; \
+ uint32_t OTG_INTERRUPT_CONTROL; \
+ uint32_t OTG_LONG_VBLANK_STATUS; \
+ uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \
+ uint32_t OTG_MASTER_EN; \
+ uint32_t OTG_PIXEL_DATA_READBACK0; \
+ uint32_t OTG_PIXEL_DATA_READBACK1; \
+ uint32_t OTG_REQUEST_CONTROL; \
+ uint32_t OTG_SNAPSHOT_CONTROL; \
+ uint32_t OTG_SNAPSHOT_FRAME; \
+ uint32_t OTG_SNAPSHOT_POSITION; \
+ uint32_t OTG_SNAPSHOT_STATUS; \
+ uint32_t OTG_SPARE_REGISTER; \
+ uint32_t OTG_STATUS_HV_COUNT; \
+ uint32_t OTG_STATUS_VF_COUNT; \
+ uint32_t OTG_STEREO_FORCE_NEXT_EYE; \
+ uint32_t OTG_TRIG_MANUAL_CONTROL; \
+ uint32_t OTG_TRIGB_CNTL; \
+ uint32_t OTG_TRIGB_MANUAL_TRIG; \
+ uint32_t OTG_UPDATE_LOCK; \
+ uint32_t OTG_V_TOTAL_INT_STATUS; \
+ uint32_t OTG_VSYNC_NOM_INT_STATUS
+
struct dcn_optc_registers {
OPTC_REG_VARIABLE_LIST_DCN;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 4f1830ba619f..c6417538090f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL);
+ optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL);
+ optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT);
+ optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL);
+ optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL);
+ optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER);
+ optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG);
+ optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW);
+ optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD);
+ optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL);
+ optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL);
+ optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL);
+ optc_reg_state->otg_control = REG_READ(OTG_CONTROL);
+ optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL);
+ optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET);
+ optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL);
+ optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK);
+ optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK);
+ optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B);
+ optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG);
+ optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B);
+ optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG);
+ optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B);
+ optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG);
+ optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B);
+ optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
+ optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
+ optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
+ optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
+ optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
+ optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE);
+ optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION);
+ optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL);
+ optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0);
+ optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1);
+ optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2);
+ optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3);
+ optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4);
+ optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS);
+ optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL);
+ optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP);
+ optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X);
+ optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y);
+ optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END);
+ optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A);
+ optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL);
+ optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL);
+ optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL);
+ optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL);
+ optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS);
+ optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL);
+ optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS);
+ optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0);
+ optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1);
+ optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE);
+ optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN);
+ optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE);
+ optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION);
+ optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS);
+ optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0);
+ optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1);
+ optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL);
+ optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL);
+ optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME);
+ optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION);
+ optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS);
+ optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER);
+ optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL);
+ optc_reg_state->otg_status = REG_READ(OTG_STATUS);
+ optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT);
+ optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT);
+ optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION);
+ optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT);
+ optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL);
+ optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE);
+ optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS);
+ optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL);
+ optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL);
+ optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG);
+ optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL);
+ optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG);
+ optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK);
+ optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END);
+ optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL);
+ optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2);
+ optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A);
+ optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL);
+ optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL);
+ optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL);
+ optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS);
+ optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX);
+ optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID);
+ optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN);
+ optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION);
+ optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL);
+ optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION);
+ optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL);
+ optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION);
+ optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM);
+ optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM);
+ optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS);
+ optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT);
+ optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM);
+}
+
static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = {
.init_odm = optc3_init_odm,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 0f72c274f40b..98f7d2e299c5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc);
void optc31_read_otg_state(struct timing_generator *optc,
struct dcn_otg_state *s);
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 4a2caca37255..43ff957288b2 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = {
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn314_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index b2b226bcd871..3dcb0d0c931c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = {
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 52d5ea98c86b..f699e95059f3 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -511,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 5af13706e601..a8e978d1fae8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -533,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = {
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index c4b4dc3ad8c9..d40d91ec2035 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -78,6 +78,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -225,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -368,6 +370,7 @@ static const struct dce_abm_mask abm_mask = {
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -402,8 +406,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -484,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id], &se_shift, &se_mask);
return &enc110->base;
@@ -624,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -952,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
int i;
int j = -1;
struct dc_link *link = stream->link;
+ enum engine_id preferred_engine = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(stream->signal))
+ preferred_engine = link->link_enc->analog_engine;
for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
@@ -960,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
+ if (pool->stream_enc[i]->id == preferred_engine)
return pool->stream_enc[i];
}
}
@@ -1093,6 +1120,7 @@ static bool dce100_resource_construct(
dc->caps.disable_dp_clk_share = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index cccde5a6f3cd..cd54382c0af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -82,6 +82,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = true,
};
@@ -1376,6 +1380,7 @@ static bool dce110_resource_construct(
dc->caps.is_apu = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 869a8e515fc0..3f0a6bc4dcc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -76,6 +76,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -1247,6 +1251,7 @@ static bool dce112_resource_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 540e04ec1e2d..b1570b6b1af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create(
return dce_i2c_hw;
}
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX),
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX),
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
};
@@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults = {
- .disable_clock_gate = true,
- .enable_legacy_fast_update = true,
+ .disable_clock_gate = true,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static struct clock_source *dce120_clock_source_create(
@@ -1089,6 +1093,7 @@ static bool dce120_resource_construct(
dc->caps.psp_setup_panel_mode = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index b75be6ad64f6..f0152933bee2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -80,6 +80,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -240,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(2),
link_regs(3),
link_regs(4),
- link_regs(5)
+ link_regs(5),
+ {0},
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -366,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -373,6 +377,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -382,6 +387,7 @@ static const struct resource_caps res_cap_61 = {
.num_timing_generator = 4,
.num_audio = 6,
.num_stream_encoder = 6,
+ .num_analog_stream_encoder = 1,
.num_pll = 3,
.num_ddc = 6,
};
@@ -389,6 +395,7 @@ static const struct resource_caps res_cap_61 = {
static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 2,
.num_pll = 3,
.num_ddc = 2,
@@ -599,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -718,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 5b7769745202..8687104cabb7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -78,6 +78,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -241,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -368,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -383,6 +387,7 @@ static const struct resource_caps res_cap = {
static const struct resource_caps res_cap_81 = {
.num_timing_generator = 4,
.num_audio = 7,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 7,
.num_pll = 3,
.num_ddc = 6,
@@ -391,6 +396,7 @@ static const struct resource_caps res_cap_81 = {
static const struct resource_caps res_cap_83 = {
.num_timing_generator = 2,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 2,
.num_ddc = 2,
@@ -418,8 +424,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dce_dmcu_registers dmcu_regs = {
@@ -605,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -724,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -919,6 +945,7 @@ static bool dce80_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
@@ -1320,6 +1347,7 @@ static bool dce83_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 652c05c35494..f12367adf145 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
@@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 0;
+ dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 84b38d2d6967..6679c1a14f2f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
void dcn20_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
}
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
- struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_ATOMIC);
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL);
if (!mpc20)
return NULL;
@@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
@@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
@@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
@@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index e4a1338d21e0..055107843a70 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn201_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN201_DPP(*dpp));
@@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create(
uint32_t inst)
{
struct dcn201_dpp *dpp =
- kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
return NULL;
@@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn201_opp *opp =
- kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL);
if (!opp) {
return NULL;
@@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
{
- struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
- GFP_ATOMIC);
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL);
if (!mpc201)
return NULL;
@@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
- kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
struct dcn10_link_encoder *enc10;
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create(
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
- kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
@@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = {
static struct dce_hwseq *dcn201_hwseq_create(
struct dc_context *ctx)
{
- struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
@@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create(
uint32_t inst)
{
struct dcn201_hubp *hubp201 =
- kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL);
if (!hubp201)
return NULL;
@@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
/*a0 only, remove later*/
dc->work_arounds.no_connect_phy_config = true;
@@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool(
struct dc *dc)
{
struct dcn201_resource_pool *pool =
- kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 918742a42ded..2060acd5ae09 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index ff63f59ff928..d0ebb733e802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct(
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 82a205a7c25c..3ad6a3d4858e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = false,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 61623cb518d9..c0d4a1dc94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1290,6 +1293,7 @@ static bool dcn302_resource_construct(
&is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 02b9a84f2db3..75e09c2c283e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3ed7f50554e2..0d667b54ccf8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index d4917a35b991..3ccde75a4ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -924,12 +924,15 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
.disable_dsc_power_gate = true,
.min_disp_clk_khz = 100000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1910,6 +1913,7 @@ static bool dcn314_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 82cc78c291d8..4e962f522f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
.using_dml2 = false,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 636110e48d01..5a95dd54cb42 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 3965a7f1b64b..81e64e17d0cb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -92,7 +92,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -738,10 +738,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dp_plus_plus_wa = true,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_stutter_for_wm_program = true
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn32_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -2294,6 +2297,7 @@ static bool dcn32_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index ad214986f7ac..3466ca34c93f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_subvp_high_refresh = false,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_dc_mode_overwrite = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn321_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index fff57f23f4f7..ef69898d2cc5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -33,7 +33,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn35_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1946,6 +1949,7 @@ static bool dcn35_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0abd163b425e..f3c614c4490c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -83,7 +83,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1917,6 +1920,7 @@ static bool dcn351_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index ca125ee6c2fb..6469d5fe2e6d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -11,7 +11,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn36_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1918,6 +1921,7 @@ static bool dcn36_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 1d18807e4749..130058d7a70c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -73,7 +73,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -721,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
- .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams_version = {
.minor = 1,
@@ -737,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_cositing = CHROMA_COSITING_NONE + 1,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn401_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1995,6 +1998,7 @@ static bool dcn401_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
index 21d842857601..88c11b6be004 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -9,7 +9,7 @@
#include "dc.h"
#include "clk_mgr.h"
#include "soc_and_ip_translator.h"
-#include "dml2/dml21/inc/dml_top_soc_parameter_types.h"
+#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h"
void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index b1fb0f8a253a..7a839984dbc0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps_c = 6;
spl_scratch->scl_data.taps.v_taps_c = 6;
}
+
+ /* Override mode: keep EASF enabled but use input taps if valid */
+ if (spl_in->override_easf) {
+ spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps;
+ spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps;
+ spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c;
+ spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c;
+
+ if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6))
+ skip_easf = true;
+ if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2))
+ spl_scratch->scl_data.taps.h_taps--;
+ if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2))
+ spl_scratch->scl_data.taps.h_taps_c--;
+ }
}
/*Ensure we can support the requested number of vtaps*/
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 23d254dea18f..20e4e52a77ac 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -545,6 +545,7 @@ struct spl_in {
enum linear_light_scaling lls_pref; // Linear Light Scaling
bool prefer_easf;
bool disable_easf;
+ bool override_easf; /* If true, keep EASF enabled but use provided in_taps */
struct spl_debug debug;
bool is_fullscreen;
bool is_hdr_on;
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 338fdc651f2c..9d0168986fe7 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -132,6 +132,7 @@ enum dmub_window_id {
DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_LSDMA_BUFFER,
+ DMUB_WINDOW_CURSOR_OFFLOAD,
DMUB_WINDOW_TOTAL,
};
@@ -317,6 +318,7 @@ struct dmub_srv_hw_params {
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
bool override_hbr3_pll_vco;
+ bool disable_dpia_bw_allocation;
};
/**
@@ -361,6 +363,19 @@ struct dmub_diagnostic_data {
uint8_t is_pwait : 1;
};
+/**
+ * struct dmub_preos_info - preos fw info before loading post os fw.
+ */
+struct dmub_preos_info {
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint64_t trace_buffer_phy_addr;
+ uint32_t trace_buffer_size;
+ uint32_t fw_version;
+ uint32_t boot_status;
+ uint32_t boot_options;
+};
+
struct dmub_srv_inbox {
/* generic status */
uint64_t num_submitted;
@@ -486,6 +501,7 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_current_time)(struct dmub_srv *dmub);
void (*get_diagnostic_data)(struct dmub_srv *dmub);
+ bool (*get_preos_fw_info)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
@@ -535,7 +551,8 @@ struct dmub_srv_create_params {
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
- * @fw_state: dmub firmware state pointer
+ * @cursor_offload_v1: Cursor offload state
+ * @fw_state: dmub firmware state pointer (debug purpose only)
*/
struct dmub_srv {
enum dmub_asic asic;
@@ -544,7 +561,9 @@ struct dmub_srv {
bool is_virtual;
struct dmub_fb scratch_mem_fb;
struct dmub_fb ib_mem_gart;
+ struct dmub_fb cursor_offload_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
+ volatile struct dmub_cursor_offload_v1 *cursor_offload_v1;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -583,6 +602,7 @@ struct dmub_srv {
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
struct dmub_fb lsdma_rb_fb;
+ struct dmub_preos_info preos_info;
};
/**
@@ -1068,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
*/
enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+/**
+ * dmub_srv_get_preos_info() - retrieves preos fw info
+ * @dmub: the dmub service
+ *
+ * Return:
+ * true - preos fw info retrieved successfully
+ * false - preos fw info not retrieved successfully
+ */
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 92248224b713..772e07a1a959 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -485,7 +485,13 @@ union replay_debug_flags {
*/
uint32_t enable_visual_confirm_debug : 1;
- uint32_t reserved : 18;
+ /**
+ * 0x4000 (bit 14)
+ * @debug_log_enabled: Debug Log Enabled
+ */
+ uint32_t debug_log_enabled : 1;
+
+ uint32_t reserved : 17;
} bitfields;
uint32_t u32All;
@@ -629,6 +635,112 @@ struct dmub_visual_confirm_color {
uint16_t panel_inst;
};
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn30_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16;
+ uint32_t reserved1[5];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn401_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16;
+ uint32_t reserved1[4];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload.
+ */
+struct dmub_cursor_offload_pipe_data_v1 {
+ union {
+ struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */
+ struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */
+ uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */
+ };
+};
+
+/**
+ * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data.
+ */
+struct dmub_cursor_offload_payload_data_v1 {
+ uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */
+ uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */
+ uint32_t pipe_mask; /**< Mask of pipes to update. */
+ uint32_t reserved; /**< Reserved for future use. */
+ struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */
+};
+
+/**
+ * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload.
+ */
+struct dmub_cursor_offload_stream_v1 {
+ struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */
+ uint32_t write_idx; /**< The index of the last written payload. */
+};
+
+/**
+ * struct dmub_cursor_offload_v1 - Cursor offload feature state.
+ */
+struct dmub_cursor_offload_v1 {
+ struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */
+};
+
//==============================================================================
//</DMUB_TYPES>=================================================================
//==============================================================================
@@ -648,7 +760,8 @@ struct dmub_visual_confirm_color {
union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
- uint32_t reserved : 31;
+ uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
+ uint32_t reserved : 30;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -814,6 +927,28 @@ enum dmub_ips_comand_type {
};
/**
+ * enum dmub_cursor_offload_comand_type - Cursor offload subcommands.
+ */
+enum dmub_cursor_offload_comand_type {
+ /**
+ * Initializes the cursor offload feature.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_INIT = 0,
+ /**
+ * Enables cursor offloading for a stream and updates the timing parameters.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1,
+ /**
+ * Disables cursor offloading for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2,
+ /**
+ * Programs the latest data for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3,
+};
+
+/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
union dmub_fw_boot_options {
@@ -844,7 +979,8 @@ union dmub_fw_boot_options {
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
- uint32_t reserved : 5; /**< reserved */
+ uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */
+ uint32_t reserved : 4; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -877,6 +1013,7 @@ enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3,
+ DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
@@ -958,6 +1095,22 @@ struct dmub_shared_state_ips_driver {
}; /* 248-bytes, fixed */
/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_stream_v1 {
+ uint32_t last_write_idx; /**< Last write index */
+ uint8_t reserved[28]; /**< Reserved bytes. */
+}; /* 32-bytes, fixed */
+
+/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_v1 {
+ struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */
+ uint8_t reserved[56]; /**< reserved for future use */
+}; /* 248-bytes, fixed */
+
+/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
@@ -983,6 +1136,7 @@ struct dmub_shared_state_feature_block {
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */
+ struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
@@ -1572,6 +1726,19 @@ enum dmub_cmd_type {
*/
DMUB_CMD__IPS = 91,
+ /**
+ * Command type use for Cursor offload.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD = 92,
+
+ /**
+ * Command type used for all SMART_POWER_HDR commands.
+ */
+ DMUB_CMD__SMART_POWER_HDR = 93,
+
+ /**
+ * Command type use for VBIOS shared commands.
+ */
DMUB_CMD__VBIOS = 128,
};
@@ -4238,6 +4405,45 @@ enum replay_enable {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_HDR_ENABLE command.
+ */
+struct dmub_rb_cmd_smart_power_hdr_enable_data {
+ /**
+ * SMART_POWER_HDR enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint16_t peak_nits;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ uint8_t debugcontrol;
+ /*
+ * vertical interrupt trigger line
+ */
+ uint32_t triggerline;
+
+ uint16_t fixed_max_cll;
+
+ uint8_t pad[2];
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command.
*/
struct dmub_rb_cmd_replay_enable_data {
@@ -4408,9 +4614,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
*/
uint16_t coasting_vtotal_high;
/**
- * Explicit padding to 4 byte boundary.
+ * frame skip number.
*/
- uint8_t pad[2];
+ uint16_t frame_skip_number;
};
/**
@@ -4571,6 +4777,58 @@ union dmub_replay_cmd_set {
};
/**
+ * SMART POWER HDR command sub-types.
+ */
+enum dmub_cmd_smart_power_hdr_type {
+
+ /**
+ * Enable/Disable SMART_POWER_HDR.
+ */
+ DMUB_CMD__SMART_POWER_HDR_ENABLE = 1,
+ /**
+ * Get current MaxCLL value if SMART POWER HDR is enabled.
+ */
+ DMUB_CMD__SMART_POWER_HDR_GETMAXCLL = 2,
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_HDR command.
+ */
+struct dmub_rb_cmd_smart_power_hdr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_rb_cmd_smart_power_hdr_enable_data data;
+};
+
+struct dmub_cmd_smart_power_hdr_getmaxcll_input {
+ uint8_t panel_inst;
+ uint8_t pad[3];
+};
+
+struct dmub_cmd_smart_power_hdr_getmaxcll_output {
+ uint16_t current_max_cll;
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_HDR command.
+ */
+struct dmub_rb_cmd_smart_power_hdr_getmaxcll {
+ struct dmub_cmd_header header; /**< Command header */
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_HDR_GETMAXCLL command.
+ */
+ union dmub_cmd_smart_power_hdr_getmaxcll_data {
+ struct dmub_cmd_smart_power_hdr_getmaxcll_input input; /**< Input */
+ struct dmub_cmd_smart_power_hdr_getmaxcll_output output; /**< Output */
+ uint32_t output_raw; /**< Raw data output */
+ } data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -4652,6 +4910,7 @@ enum hw_lock_client {
*/
HW_LOCK_CLIENT_REPLAY = 4,
HW_LOCK_CLIENT_FAMS2 = 5,
+ HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6,
/**
* Invalid client.
*/
@@ -6064,6 +6323,40 @@ struct dmub_rb_cmd_ips_query_residency_info {
};
/**
+ * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command.
+ */
+struct dmub_cmd_cursor_offload_init_data {
+ union dmub_addr state_addr; /**< State address for dmub_cursor_offload */
+ uint32_t state_size; /**< State size for dmub_cursor_offload */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_init_data init_data;
+};
+
+/**
+ * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command.
+ */
+struct dmub_cmd_cursor_offload_stream_data {
+ uint32_t otg_inst: 4; /**< OTG instance to control */
+ uint32_t reserved: 28; /**< Reserved for future use */
+ uint32_t line_time_in_ns; /**< Line time in ns for the OTG */
+ uint32_t v_total_max; /**< OTG v_total_max */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_stream_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_stream_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -6392,6 +6685,26 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command.
+ */
+ struct dmub_rb_cmd_cursor_offload_init cursor_offload_init;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands.
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR
+ */
+ struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl;
+ /**
+ * Definition of a DMUB_CMD__SMART_POWER_HDR_ENABLE command.
+ */
+ struct dmub_rb_cmd_smart_power_hdr_enable smart_power_hdr_enable;
+ /**
+ * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_HDR_GETMAXCLL command.
+ */
+ struct dmub_rb_cmd_smart_power_hdr_getmaxcll smart_power_hdr_getmaxcll;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 4777c7203b2c..cd04d7c756c3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -380,6 +380,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index ce041f6239dc..7e9856289910 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,50 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100000;
- uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0 && is_enabled != 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout; ++i) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(1);
- }
-
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
- udelay(1);
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
if (pwait_mode & (1 << 0))
break;
- udelay(1);
+ udelay(poll_delay_us);
}
- /* Force reset in case we timed out, DMCUB is likely hung. */
}
- if (is_enabled) {
+ if (enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
+ }
+
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -141,7 +149,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
- /* Clear the GPINT command manually so we don't send anything during boot. */
+ /* Clear the GPINT command manually so we don't reset again. */
cmd.all = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
}
@@ -163,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub,
dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -193,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 834e5434ccb8..e13557ed97be 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -418,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -520,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
+
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub)
+{
+ uint64_t region3_cw5_offset;
+ uint32_t top_addr, top_addr_enable, offset_low;
+ uint32_t offset_high, base_addr, fw_version;
+ bool is_vbios_fw = false;
+
+ memset(&dmub->preos_info, 0, sizeof(dmub->preos_info));
+
+ fw_version = REG_READ(DMCUB_SCRATCH1);
+ is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false;
+ if (!is_vbios_fw)
+ return false;
+
+ dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0);
+ dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1);
+ dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14);
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_ENABLE, &top_addr_enable);
+ if (top_addr_enable) {
+ dmub_dcn35_get_fb_base_offset(dmub,
+ &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset);
+ offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET);
+ offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH);
+ region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low;
+ dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset
+ - dmub->preos_info.fb_base + dmub->preos_info.fb_offset;
+
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr);
+ base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF;
+ dmub->preos_info.trace_buffer_size =
+ (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0;
+ }
+
+ return true;
+}
+
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
/* DMCUB_REGION3_TMR_AXI_SPACE values:
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 39fcb7275da5..92e6695a2c9b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub);
void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN35_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index b31adbd0d685..95542299e3b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
for (; i < timeout_us; i++) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
}
}
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
if (i >= timeout_us) {
/* timeout should never occur */
BREAK_TO_DEBUGGER();
}
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index b17a19400c06..a657efda89ce 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn35_get_current_time;
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
+ funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
@@ -564,10 +565,11 @@ enum dmub_status
window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
- window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64);
window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
+ window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64);
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
- struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
- struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
+ uint32_t i;
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
- ASSERT(0);
- return DMUB_STATUS_INVALID;
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (!params->fb[i]) {
+ ASSERT(0);
+ return DMUB_STATUS_INVALID;
+ }
}
dmub->fb_base = params->fb_base;
@@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->shared_state = shared_state_fb->cpu_addr;
- dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM];
- dmub->ib_mem_gart = *ib_mem_gart;
+ dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD];
+ dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
@@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
+
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->hw_funcs.get_preos_fw_info)
+ return false;
+
+ return dmub->hw_funcs.get_preos_fw_info(dmub);
+}
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 812377d9e48f..973b6bdbac63 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -135,12 +135,8 @@ struct bp_external_encoder_control {
struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
- /* from GPU Tx aka asic_signal */
- enum signal_type signal;
- /* sink_signal may differ from asicSignal if Translator encoder */
enum signal_type sink_signal;
- enum display_output_bit_depth display_output_bit_depth;
- bool enable_dp_audio;
+ uint8_t bit_depth;
};
struct bp_transmitter_control {
@@ -166,6 +162,11 @@ struct bp_transmitter_control {
bool single_pll_mode;
};
+struct bp_load_detection_parameters {
+ enum engine_id engine_id;
+ uint16_t device_id;
+};
+
struct bp_hw_crtc_timing_parameters {
enum controller_id controller_id;
/* horizontal part */
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index cc467031651d..38a77fa9b4af 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -169,6 +169,7 @@ struct dc_firmware_info {
uint32_t engine_clk_ss_percentage;
} feature;
+ uint32_t max_pixel_clock; /* in KHz */
uint32_t default_display_engine_pll_frequency; /* in KHz */
uint32_t external_clock_source_frequency_for_dp; /* in KHz */
uint32_t smu_gpu_pll_output_freq; /* in KHz */
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 54e33062b3c0..1386fa124e85 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal(
}
return false;
}
+
+static inline bool dc_connector_supports_analog(const enum connector_id conn)
+{
+ return conn == CONNECTOR_ID_VGA ||
+ conn == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ conn == CONNECTOR_ID_DUAL_LINK_DVII;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index a10d6b988aab..3a2c2d2fb629 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+/**
+ * dc_is_rgb_signal() - Whether the signal is analog RGB.
+ *
+ * Returns whether the given signal type is an analog RGB signal
+ * that is used with a DAC on VGA or DVI-I connectors.
+ * Not to be confused with other uses of "RGB", such as RGB color space.
+ */
+static inline bool dc_is_rgb_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_RGB);
+}
+
static inline bool dc_is_tmds_signal(enum signal_type signal)
{
switch (signal) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index c760216a6240..ca402ddcdacc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -354,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* reset retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* add display to connection */
@@ -400,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* remove display */
@@ -464,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* set new adjustment */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index a37634942b07..b883d626f1c3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -508,7 +508,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
output->auth_complete = 1;
- mod_hdcp_log_ddc_trace(hdcp);
+ HDCP_AUTH_COMPLETE_TRACE(hdcp);
}
/* connection topology helpers */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 8bc377560787..1bbd728d4345 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ enum mod_hdcp_status status;
u8 bksv[sizeof(n)] = { };
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
@@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
count++;
n &= (n - 1);
}
- return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+
+ if (count == 20) {
+ hdcp->connection.trace.hdcp1.attempt_count++;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+ }
+ return status;
}
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
@@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp);
+
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index bb8ae80b37f8..5628f0ef73fd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
if (is_dp_hdcp(hdcp))
status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
@@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
- status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
- MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi
+ & HDCP_2_2_HDMI_SUPPORT_MASK)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ trace->hdcp2.attempt_count++;
+
return status;
}
@@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
/* Avoid device count == 0 to do authentication */
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ trace->hdcp2.downstream_device_count = get_device_count(hdcp);
+ trace->hdcp2.hdcp1_device_downstream =
+ HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
+ trace->hdcp2.hdcp2_legacy_device_downstream =
+ HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 1d83c1b9da10..26553aa4c5ca 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -31,6 +31,7 @@
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#define HDCP_LOG_TRA(hdcp) do {} while (0)
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
@@ -131,4 +132,9 @@
HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
} while (0)
+#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \
+ mod_hdcp_log_ddc_trace(hdcp); \
+ HDCP_LOG_TRA(hdcp); \
+} while (0)
+
#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index b51ddf2846df..46e52fb3a118 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -230,9 +230,23 @@ struct mod_hdcp_error {
uint8_t state_id;
};
+struct mod_hdcp1_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+};
+
+struct mod_hdcp2_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+ uint8_t hdcp1_device_downstream;
+ uint8_t hdcp2_legacy_device_downstream;
+};
+
struct mod_hdcp_trace {
struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
uint8_t error_count;
+ struct mod_hdcp1_trace hdcp1;
+ struct mod_hdcp2_trace hdcp2;
};
enum mod_hdcp_encryption_status {
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
index 086869264425..a252ee4c7874 100644
--- a/drivers/gpu/drm/amd/include/amd_cper.h
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 75efda2969cf..17945094a138 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VPE,
AMD_IP_BLOCK_TYPE_UMSCH_MM,
AMD_IP_BLOCK_TYPE_ISP,
+ AMD_IP_BLOCK_TYPE_RAS,
AMD_IP_BLOCK_TYPE_NUM,
};
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2b0cdb2a2775..f92f78d5d330 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -454,7 +454,7 @@ struct amd_pm_funcs {
bool gate,
int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
- int (*set_power_limit)(void *handle, uint32_t n);
+ int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
@@ -532,6 +532,110 @@ struct metrics_table_header {
uint8_t content_revision;
};
+enum amdgpu_metrics_attr_id {
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC,
+ AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH,
+ AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST,
+ AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK,
+ AMDGPU_METRICS_ATTR_ID_NUM_PARTITION,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST,
+ AMDGPU_METRICS_ATTR_ID_JPEG_BUSY,
+ AMDGPU_METRICS_ATTR_ID_VCN_BUSY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_MAX,
+};
+
+enum amdgpu_metrics_attr_type {
+ AMDGPU_METRICS_TYPE_U8,
+ AMDGPU_METRICS_TYPE_S8,
+ AMDGPU_METRICS_TYPE_U16,
+ AMDGPU_METRICS_TYPE_S16,
+ AMDGPU_METRICS_TYPE_U32,
+ AMDGPU_METRICS_TYPE_S32,
+ AMDGPU_METRICS_TYPE_U64,
+ AMDGPU_METRICS_TYPE_S64,
+ AMDGPU_METRICS_TYPE_MAX,
+};
+
+enum amdgpu_metrics_attr_unit {
+ /* None */
+ AMDGPU_METRICS_UNIT_NONE,
+ /* MHz*/
+ AMDGPU_METRICS_UNIT_CLOCK_1,
+ /* Degree Celsius*/
+ AMDGPU_METRICS_UNIT_TEMP_1,
+ /* Watts*/
+ AMDGPU_METRICS_UNIT_POWER_1,
+ /* In nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_1,
+ /* In 10 nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_2,
+ /* Speed in GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_1,
+ /* Speed in 0.1 GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_2,
+ /* Bandwidth GB/s */
+ AMDGPU_METRICS_UNIT_BW_1,
+ /* Data in KB */
+ AMDGPU_METRICS_UNIT_DATA_1,
+ /* Percentage */
+ AMDGPU_METRICS_UNIT_PERCENT,
+ AMDGPU_METRICS_UNIT_MAX,
+};
+
+#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000
+#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24
+#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000
+#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20
+#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00
+#define AMDGPU_METRICS_ATTR_ID_SHIFT 10
+#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF
+#define AMDGPU_METRICS_ATTR_INST_SHIFT 0
+
+#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \
+ (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \
+ ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \
+ ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst))
+
/*
* gpu_metrics_v1_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v1_1 or later instead.
@@ -1221,6 +1325,19 @@ struct gpu_metrics_v1_8 {
uint32_t pcie_lc_perf_other_end_recovery;
};
+struct gpu_metrics_attr {
+ /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */
+ uint64_t attr_encoding;
+ /* Attribute value, depends on attr_encoding */
+ void *attr_value;
+};
+
+struct gpu_metrics_v1_9 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index ab1cfc92dbeb..f9629d42ada2 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -345,7 +345,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index 69611c7e30e3..2f12cba4eb66 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -399,7 +399,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 518d07afc7df..5d08dc3b7110 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -1616,6 +1616,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit_type,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1626,7 +1627,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
- limit);
+ limit_type, limit);
mutex_unlock(&adev->pm.mutex);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b5fbb0fd1dc0..c88a76cce401 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -108,8 +108,9 @@ const char * const amdgpu_pp_profile_name[] = {
static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
{
bool runpm_check = runpm ? adev->in_runpm : false;
+ bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
- if (amdgpu_in_reset(adev))
+ if (amdgpu_in_reset(adev) || !full_init)
return -EBUSY;
if (adev->in_suspend && !runpm_check)
@@ -173,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
*/
static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
{
- pm_runtime_mark_last_busy(adev->dev);
pm_runtime_put_autosuspend(adev->dev);
}
@@ -3390,13 +3390,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
- value |= limit_type << 24;
err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
- err = amdgpu_dpm_set_power_limit(adev, value);
+ err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
amdgpu_pm_put_access(adev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65c1d98af26c..3bce74f8bb0a 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -553,7 +553,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
- uint32_t limit);
+ uint32_t limit_type, uint32_t limit);
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 554492dfa3c0..76a5353d7f4a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -28,12 +27,10 @@
#include <linux/firmware.h>
#include <linux/reboot.h>
#include "amd_shared.h"
-#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
#include "amdgpu_dpm_internal.h"
-#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -955,7 +952,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-static int pp_set_power_limit(void *handle, uint32_t limit)
+static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t max_power_limit;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index d2dbd90bb427..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 1f50f1e74c48..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fb8086859857..4317da6f7c38 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu,
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
-static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
@@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
/* Enable restore flag */
smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
- /* set the user dpm power limit */
- if (smu->user_dpm_profile.power_limit) {
- ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ /* set the user dpm power limits */
+ for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {
+ if (!smu->user_dpm_profile.power_limits[i])
+ continue;
+ ret = smu_set_power_limit(smu, i,
+ smu->user_dpm_profile.power_limits[i]);
if (ret)
- dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);
}
/* set the user dpm clock configurations */
@@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
return true;
}
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *read_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)
+ ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);
+
+ return ret;
+}
static int smu_sys_get_pp_table(void *handle,
char **table)
@@ -2225,7 +2239,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2257,18 +2270,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
- if (smu->current_power_limit) {
- ret = smu_set_power_limit(smu, smu->current_power_limit);
- if (ret && ret != -EOPNOTSUPP)
- return ret;
- }
-
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
- ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
- if (ret)
- return ret;
- }
-
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -2958,37 +2959,34 @@ int smu_get_power_limit(void *handle,
return ret;
}
-static int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct smu_context *smu = handle;
- uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- limit &= (1<<24)-1;
- if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit)
- return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
-
- if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
- dev_err(smu->adev->dev,
- "New power limit (%d) is out of range [%d,%d]\n",
- limit, smu->min_power_limit, smu->max_power_limit);
- return -EINVAL;
+ if (limit_type == SMU_DEFAULT_PPT_LIMIT) {
+ if (!limit)
+ limit = smu->current_power_limit;
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
+ return -EINVAL;
+ }
}
- if (!limit)
- limit = smu->current_power_limit;
-
if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.power_limit = limit;
+ if (ret)
+ return ret;
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ smu->user_dpm_profile.power_limits[limit_type] = limit;
}
- return ret;
+ return 0;
}
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 582c186d8b62..c48028abc8c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -212,6 +212,7 @@ enum smu_power_src_type {
enum smu_ppt_limit_type {
SMU_DEFAULT_PPT_LIMIT = 0,
SMU_FAST_PPT_LIMIT,
+ SMU_LIMIT_TYPE_COUNT,
};
enum smu_ppt_limit_level {
@@ -231,7 +232,7 @@ enum smu_memory_pool_size {
struct smu_user_dpm_profile {
uint32_t fan_mode;
- uint32_t power_limit;
+ uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
uint32_t fan_speed_pwm;
uint32_t fan_speed_rpm;
uint32_t flags;
@@ -1521,6 +1522,15 @@ struct pptable_funcs {
*/
ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
void *table);
+ /**
+ * @ras_send_msg: Send a message with a parameter from Ras
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
+ int (*ras_send_msg)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
};
typedef enum {
@@ -1786,6 +1796,8 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *readarg);
#endif
void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 9548bd3c624b..55401e6b2b0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
- int ret = 0, size = 0;
+ int ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0;
int i;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
}
- return size;
+ return size - start_offset;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 0028f10ead42..bbf09aec9152 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, levels, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_DCEFCLK:
ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
- return size;
+ return size - start_offset;
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
@@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
- return size;
+ return size - start_offset;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
@@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
} else {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
- return size;
+ return size - start_offset;
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
@@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int navi10_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 31c2c0386b1f..774283ac7827 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
- int i, size = 0, ret = 0;
+ int i, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2c9869feba61..53579208cffb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_print_clk_levels(struct smu_context *smu,
@@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_common_print_clk_levels(struct smu_context *smu,
@@ -2308,8 +2310,7 @@ static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
uint32_t ppt_limit;
int ret = 0;
@@ -2345,12 +2346,11 @@ static int vangogh_get_power_limit(struct smu_context *smu,
}
static int vangogh_get_ppt_limit(struct smu_context *smu,
- uint32_t *ppt_limit,
- enum smu_ppt_limit_type type,
- enum smu_ppt_limit_level level)
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
if (!power_context)
return -EOPNOTSUPP;
@@ -2399,7 +2399,6 @@ static int vangogh_set_power_limit(struct smu_context *smu,
smu->current_power_limit = ppt_limit;
break;
case SMU_FAST_PPT_LIMIT:
- ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
if (ppt_limit > power_context->max_fast_ppt_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 3baf20f4c373..eaa9ea162f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
bool cur_value_match_level = false;
@@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_RANGE:
@@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
}
- return size;
+ return size - start_offset;
case SMU_SOCCLK:
count = NUM_SOCCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
@@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index c1062e5f0393..677781060246 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index b081ae3e8f43..6908f9930f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_4_read_sensor(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index f5db181ef489..4576bf008b22 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -861,11 +861,12 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 285cf7979693..0a7d2cea7dc6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -450,7 +450,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
- if ((pgm == 0) && (fw_ver >= 0x00558200))
+ if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -1428,7 +1429,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int now, size = 0;
+ int now, size = 0, start_offset = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_13_0_dpm_table *single_dpm_table;
@@ -1437,10 +1438,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
dpm_context = smu_dpm->dpm_context;
@@ -1575,7 +1577,7 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
@@ -3226,6 +3228,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
+{
+ int ret;
+
+ switch (msg) {
+ case SMU_MSG_QueryValidMcaCount:
+ case SMU_MSG_QueryValidMcaCeCount:
+ case SMU_MSG_McaBankDumpDW:
+ case SMU_MSG_McaBankCeDumpDW:
+ case SMU_MSG_ClearMcaOnRead:
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
+ break;
+ default:
+ ret = -EPERM;
+ }
+
+ return ret;
+}
static int smu_v13_0_6_post_init(struct smu_context *smu)
{
@@ -3921,6 +3941,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.reset_sdma = smu_v13_0_6_reset_sdma,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
.post_init = smu_v13_0_6_post_init,
+ .ras_send_msg = smu_v13_0_6_ras_send_msg,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c96fa5e49ed6..a3fc35b9011e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 73b4506ef5a8..5d7e671fa3c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
uint32_t clk_limit = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int yellow_carp_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index fe00c84b1cc6..b1bd946d8e30 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, ret = 0, size = 0;
+ int i, idx, ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 086501cc5213..2cea688c604f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f532f7c69259..a8961a8f5c42 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile
new file mode 100644
index 000000000000..bbdaba811d34
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+ifeq ($(AMD_GPU_RAS_MGR),)
+ AMD_GPU_RAS_MGR := ras_mgr
+endif
+
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR)
+
+RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore
+
+AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS)))
+
+include $(AMD_RAS)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
new file mode 100644
index 000000000000..5e5a2cfa4068
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
@@ -0,0 +1,33 @@
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+RAS_MGR_FILES = amdgpu_ras_sys.o \
+ amdgpu_ras_mgr.o \
+ amdgpu_ras_eeprom_i2c.o \
+ amdgpu_ras_mp1_v13_0.o \
+ amdgpu_ras_cmd.o \
+ amdgpu_ras_process.o \
+ amdgpu_ras_nbio_v7_9.o
+
+
+RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_MGR)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
new file mode 100644
index 000000000000..78419b7f7729
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_mgr.h"
+
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+#define AMDGPU_RAS_TYPE_RASCORE 0x1
+#define AMDGPU_RAS_TYPE_AMDGPU 0x2
+#define AMDGPU_RAS_TYPE_VF 0x3
+
+static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ RAS_DEV_WARN(adev, "Failed to disallow df cstate");
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to disallow XGMI power down");
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ RAS_DEV_WARN(adev, "Failed to allow df cstate");
+ }
+
+ return 0;
+}
+
+static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core,
+ uint64_t addr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
+
+static int amdgpu_ras_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (req->block_id == RAS_BLOCK_ID__UMC) {
+ if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ req->address);
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ if ((req->address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.",
+ req->address);
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+ }
+
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ req->block_id != RAS_BLOCK_ID__GFX) {
+ req->address = local_addr_to_xgmi_global_addr(ras_core, req->address);
+ }
+ }
+
+ amdgpu_ras_trigger_error_prepare(ras_core, req);
+ ret = rascore_handle_cmd(ras_core, cmd, data);
+ amdgpu_ras_trigger_error_end(ras_core, req);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_dev_handle *input_data =
+ (struct ras_cmd_dev_handle *)cmd->input_buff_raw;
+ struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges =
+ (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw;
+ struct amdgpu_mem_partition_info *mem_ranges;
+ uint32_t i = 0;
+
+ if (cmd->input_size != sizeof(*input_data))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ mem_ranges = adev->gmc.mem_partitions;
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT;
+ ranges->range[i].size = mem_ranges[i].size;
+ ranges->range[i].idx = i;
+ }
+
+ ranges->num_ranges = adev->gmc.num_mem_partitions;
+
+ ranges->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_translate_fb_address(struct ras_core_context *ras_core,
+ enum ras_fb_addr_type src_type,
+ enum ras_fb_addr_type dest_type,
+ union ras_translate_fb_address *src_addr,
+ union ras_translate_fb_address *dest_addr)
+{
+ uint64_t soc_phy_addr;
+ int ret = RAS_CMD__SUCCESS;
+
+ /* Does not need to be queued as event as this is a SW translation */
+ switch (src_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ soc_phy_addr = src_addr->soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_bank_to_soc_pa(ras_core,
+ src_addr->bank_addr, &soc_phy_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ switch (dest_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ dest_addr->soc_phy_addr = soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_soc_pa_to_bank(ras_core,
+ soc_phy_addr, &dest_addr->bank_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_translate_fb_address_req *req_buff =
+ (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw;
+ struct ras_cmd_translate_fb_address_rsp *rsp_buff =
+ (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) ||
+ (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type,
+ req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ rsp_buff->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error},
+ {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges},
+ {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address},
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i, res;
+
+ for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) {
+ if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &amdgpu_ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (ras_cmd)
+ res = ras_cmd->func(ras_core, cmd, NULL);
+ else
+ res = RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return res;
+}
+
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd)
+{
+ struct ras_core_context *cmd_core = ras_core;
+ int timeout = 60;
+ int res;
+
+ cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD;
+ cmd->output_size = 0;
+
+ if (!ras_core_is_enabled(cmd_core))
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+
+ while (ras_core_gpu_in_reset(cmd_core)) {
+ msleep(1000);
+ if (!timeout--)
+ return RAS_CMD__ERROR_TIMEOUT;
+ }
+
+ res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL);
+ if (res == RAS_CMD__ERROR_UKNOWN_CMD)
+ res = rascore_handle_cmd(cmd_core, cmd, NULL);
+
+ cmd->cmd_res = res;
+
+ if (cmd->output_size > cmd->output_buf_size) {
+ RAS_DEV_ERR(cmd_core->dev,
+ "Output size 0x%x exceeds output buffer size 0x%x!\n",
+ cmd->output_size, cmd->output_buf_size);
+ return RAS_CMD__SUCCESS_EXEED_BUFFER;
+ }
+
+ return RAS_CMD__SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
new file mode 100644
index 000000000000..5973b156cc85
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_CMD_H__
+#define __AMDGPU_RAS_CMD_H__
+#include "ras.h"
+
+enum amdgpu_ras_cmd_id {
+ RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START,
+ RAS_CMD__TRANSLATE_MEMORY_FD,
+ RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END,
+};
+
+struct ras_cmd_translate_memory_fd_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t type;
+ uint32_t fd;
+ uint64_t address;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_translate_memory_fd_rsp {
+ uint32_t version;
+ uint32_t padding;
+ uint64_t start;
+ uint64_t size;
+ uint32_t reserved[2];
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
new file mode 100644
index 000000000000..1bb7b7001ec7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "ras_eeprom.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control))
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+
+static int ras_eeprom_i2c_config(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ u8 i2c_addr;
+
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+ return 0;
+ }
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 14):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return 0;
+ default:
+ return -ENODATA;
+ }
+ return -ENODATA;
+}
+
+static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter;
+ u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .flags = 0,
+ .len = EEPROM_OFFSET_SIZE,
+ .buf = eeprom_offset_buf,
+ },
+ {
+ .flags = read ? I2C_M_RD : 0,
+ },
+ };
+ const u8 *p = eeprom_buf;
+ int r;
+ u16 len;
+
+ for (r = 0; buf_size > 0;
+ buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
+ /* Set the EEPROM address we want to write to/read from.
+ */
+ msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
+ msgs[1].addr = msgs[0].addr;
+ msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
+ msgs[0].buf[1] = eeprom_addr & 0xff;
+
+ if (!read) {
+ /* Write the maximum amount of data, without
+ * crossing the device's page boundary, as per
+ * its spec. Partial page writes are allowed,
+ * starting at any location within the page,
+ * so long as the page boundary isn't crossed
+ * over (actually the page pointer rolls
+ * over).
+ *
+ * As per the AT24CM02 EEPROM spec, after
+ * writing into a page, the I2C driver should
+ * terminate the transfer, i.e. in
+ * "i2c_transfer()" below, with a STOP
+ * condition, so that the self-timed write
+ * cycle begins. This is implied for the
+ * "i2c_transfer()" abstraction.
+ */
+ len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK),
+ buf_size);
+ } else {
+ /* Reading from the EEPROM has no limitation
+ * on the number of bytes read from the EEPROM
+ * device--they are simply sequenced out.
+ * Keep in mind that i2c_msg.len is u16 type.
+ */
+ len = min(U16_MAX, buf_size);
+ }
+ msgs[1].len = len;
+ msgs[1].buf = eeprom_buf;
+
+
+ /* This constitutes a START-STOP transaction.
+ */
+ r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
+ if (r != ARRAY_SIZE(msgs))
+ break;
+
+ if (!read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+ }
+
+ return r < 0 ? r : eeprom_buf - p;
+}
+
+const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = {
+ .eeprom_i2c_xfer = ras_eeprom_i2c_xfer,
+ .update_eeprom_i2c_config = ras_eeprom_i2c_config,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
new file mode 100644
index 000000000000..3b5878605411
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_EEPROM_I2C_H__
+#define __AMDGPU_RAS_EEPROM_I2C_H__
+#include "ras.h"
+
+extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
new file mode 100644
index 000000000000..8007e49951d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_process.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+
+/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
+#define ESTIMATE_BAD_PAGE_THRESHOLD(size) ((size)/(100 * 1024 * 1024ULL))
+
+#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4)
+
+/* Reserve 8 physical dram row for possible retirement.
+ * In worst cases, it will lose 8 * 2MB memory in vram domain
+ */
+#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
+
+
+static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr)
+{
+ struct ras_event_state *event_state;
+ int i;
+
+ memset(mgr, 0, sizeof(*mgr));
+ atomic64_set(&mgr->seqno, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
+ event_state = &mgr->event_state[i];
+ event_state->last_seqno = RAS_EVENT_INVALID_ID;
+ atomic64_set(&event_state->count, 0);
+ }
+}
+
+static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_reset_in_recovery(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_mgr_init_event_mgr(event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_aca_config *aca_cfg = &config->aca_cfg;
+
+ aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE;
+ aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET;
+ aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg;
+
+ eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func;
+ eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus;
+ if (eeprom_cfg->eeprom_i2c_adapter) {
+ const struct i2c_adapter_quirks *quirks =
+ ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks;
+
+ if (quirks) {
+ eeprom_cfg->max_i2c_read_len = quirks->max_read_len;
+ eeprom_cfg->max_i2c_write_len = quirks->max_write_len;
+ }
+ }
+
+ /*
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
+ */
+ if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ ESTIMATE_BAD_PAGE_THRESHOLD(adev->gmc.mc_vram_size);
+ else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT);
+ else
+ eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold;
+
+ eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_mp1_config *mp1_cfg = &config->mp1_cfg;
+ int ret = 0;
+
+ switch (config->mp1_ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The mp1(0x%x) ras config is not right!\n",
+ config->mp1_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_nbio_config *nbio_cfg = &config->nbio_cfg;
+ int ret = 0;
+
+ switch (config->nbio_ip_version) {
+ case IP_VERSION(7, 9, 0):
+ nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The nbio(0x%x) ras config is not right!\n",
+ config->mp1_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ta_context *context = &adev->psp.ras_context.context;
+
+ status->initialized = context->initialized;
+ status->session_id = context->session_id;
+ status->psp_cmd_mutex = &adev->psp.mutex;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t nps_mode;
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_ta_param->poison_mode_en = 1;
+
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ ras_ta_param->dgpu_mode = 1;
+
+ ras_ta_param->xcc_mask = adev->gfx.xcc_mask;
+ ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+
+ ras_ta_param->active_umc_mask = adev->umc.active_mask;
+
+ if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode))
+ ras_ta_param->nps_mode = nps_mode;
+
+ return 0;
+}
+
+const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = {
+ .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status,
+ .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param,
+};
+
+static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_psp_config *psp_cfg = &config->psp_cfg;
+
+ psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_umc_config *umc_cfg = &config->umc_cfg;
+
+ umc_cfg->umc_vram_type = adev->gmc.vram_type;
+
+ return 0;
+}
+
+static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev)
+{
+ struct ras_core_config init_config;
+
+ memset(&init_config, 0, sizeof(init_config));
+
+ init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0);
+ init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
+ init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0);
+ init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+
+ if (init_config.umc_ip_version == IP_VERSION(12, 0, 0))
+ init_config.aca_ip_version = IP_VERSION(1, 0, 0);
+
+ init_config.sys_fn = &amdgpu_ras_sys_fn;
+ init_config.ras_eeprom_supported = true;
+ init_config.poison_supported =
+ amdgpu_ras_is_poison_mode_supported(adev);
+
+ amdgpu_ras_mgr_init_aca_config(adev, &init_config);
+ amdgpu_ras_mgr_init_eeprom_config(adev, &init_config);
+ amdgpu_ras_mgr_init_mp1_config(adev, &init_config);
+ amdgpu_ras_mgr_init_nbio_config(adev, &init_config);
+ amdgpu_ras_mgr_init_psp_config(adev, &init_config);
+ amdgpu_ras_mgr_init_umc_config(adev, &init_config);
+
+ return ras_core_create(&init_config);
+}
+
+static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr;
+ int ret = 0;
+
+ ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL);
+ if (!ras_mgr)
+ return -EINVAL;
+
+ con->ras_mgr = ras_mgr;
+ ras_mgr->adev = adev;
+
+ ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev);
+ if (!ras_mgr->ras_core) {
+ RAS_DEV_ERR(adev, "Failed to create ras core!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ras_mgr->ras_core->dev = adev;
+
+ amdgpu_ras_process_init(adev);
+ ras_core_sw_init(ras_mgr->ras_core);
+ amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core);
+ return 0;
+
+err:
+ kfree(ras_mgr);
+ return ret;
+}
+
+static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr;
+
+ if (!ras_mgr)
+ return 0;
+
+ amdgpu_ras_process_fini(adev);
+ ras_core_sw_fini(ras_mgr->ras_core);
+ ras_core_destroy(ras_mgr->ras_core);
+ ras_mgr->ras_core = NULL;
+
+ kfree(con->ras_mgr);
+ con->ras_mgr = NULL;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+
+ /* Currently only debug mode can enable the ras module
+ */
+ if (!adev->debug_enable_ras_aca)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ret = ras_core_hw_init(ras_mgr->ras_core);
+ if (ret) {
+ RAS_DEV_ERR(adev, "Failed to initialize ras core!\n");
+ return ret;
+ }
+
+ ras_mgr->ras_is_ready = true;
+
+ amdgpu_enable_uniras(adev, true);
+
+ RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n");
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ /* Currently only debug mode can enable the ras module
+ */
+ if (!adev->debug_enable_ras_aca)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ras_core_hw_fini(ras_mgr->ras_core);
+
+ ras_mgr->ras_is_ready = false;
+
+ return 0;
+}
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev)
+{
+ if (!adev || !adev->psp.ras_context.ras)
+ return NULL;
+
+ return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr;
+}
+
+static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = {
+ .name = "ras_v1_0",
+ .sw_init = amdgpu_ras_mgr_sw_init,
+ .sw_fini = amdgpu_ras_mgr_sw_fini,
+ .hw_init = amdgpu_ras_mgr_hw_init,
+ .hw_fini = amdgpu_ras_mgr_hw_fini,
+};
+
+const struct amdgpu_ip_block_version ras_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_RAS,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ras_v1_0_ip_funcs,
+};
+
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EPERM;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EPERM;
+
+ RAS_DEV_INFO(adev, "Enable amdgpu unified ras!");
+ return ras_core_set_status(ras_mgr->ras_core, enable);
+}
+
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return ras_core_is_enabled(ras_mgr->ras_core);
+}
+
+static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready &&
+ ras_core_is_ready(ras_mgr->ras_core))
+ return true;
+
+ return false;
+}
+
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_handle_nbio_irq(ras_mgr->ras_core, data);
+}
+
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+ uint64_t seq_no;
+
+ if (!amdgpu_ras_mgr_is_ready(adev) ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return 0;
+
+ seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type);
+
+ if ((seqno_type == RAS_SEQNO_TYPE_DE) ||
+ (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) {
+ ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no);
+ if (ret)
+ RAS_DEV_WARN(adev, "There are too many ras interrupts!");
+ }
+
+ return seq_no;
+}
+
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ uint64_t seq_no = 0;
+ int ret = 0;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) {
+ if (ras_mgr->ras_core->poison_supported) {
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE);
+ RAS_DEV_INFO(adev,
+ "{%llu} RAS poison is created, no user action is needed.\n",
+ seq_no);
+ }
+
+ ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info);
+ } else if (ras_mgr->ras_core->poison_supported) {
+ ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info);
+ } else {
+ RAS_DEV_WARN(adev,
+ "No RAS interrupt handler for non-UMC block with poison disabled.\n");
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return amdgpu_ras_process_handle_consumption_interrupt(adev, data);
+}
+
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_update_ecc_info(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ con->gpu_reset_flags |= flags;
+ return amdgpu_ras_reset_gpu(adev);
+}
+
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_eeprom_check_safety_watermark(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev,
+ uint32_t *nps_mode)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ uint32_t mode;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EINVAL;
+
+ mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core);
+ if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE)
+ return -EINVAL;
+
+ *nps_mode = mode;
+
+ return 0;
+}
+
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_umc_check_retired_addr(ras_mgr->ras_core, addr);
+}
+
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready)
+ return false;
+
+ return ras_core_gpu_is_rma(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_cmd_ctx *cmd_ctx;
+ uint32_t ctx_buf_size = PAGE_SIZE;
+ int ret;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL);
+ if (!cmd_ctx)
+ return -ENOMEM;
+
+ cmd_ctx->cmd_id = cmd_id;
+
+ memcpy(cmd_ctx->input_buff_raw, input, input_size);
+ cmd_ctx->input_size = input_size;
+ cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx);
+
+ ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx);
+ if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size))
+ memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size);
+
+ kfree(cmd_ctx);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
new file mode 100644
index 000000000000..42f190a8feb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_MGR_H__
+#define __AMDGPU_RAS_MGR_H__
+#include "ras.h"
+#include "amdgpu_ras_process.h"
+
+enum ras_ih_type {
+ RAS_IH_NONE,
+ RAS_IH_FROM_BLOCK_CONTROLLER,
+ RAS_IH_FROM_CONSUMER_CLIENT,
+ RAS_IH_FROM_FATAL_ERROR,
+};
+
+struct ras_ih_info {
+ uint32_t block;
+ union {
+ struct amdgpu_iv_entry iv_entry;
+ struct {
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+ };
+ };
+};
+
+struct amdgpu_ras_mgr {
+ struct amdgpu_device *adev;
+ struct ras_core_context *ras_core;
+ struct delayed_work retire_page_dwork;
+ struct ras_event_manager ras_event_mgr;
+ uint64_t last_poison_consumption_seqno;
+ bool ras_is_ready;
+};
+
+extern const struct amdgpu_ip_block_version ras_v1_0_ip_block;
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(
+ struct amdgpu_device *adev);
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable);
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags);
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type);
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode);
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
new file mode 100644
index 000000000000..79a51b1603ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_smu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core,
+ u32 msg, u32 *count)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ u32 smu_msg;
+ int ret = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ?
+ SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ if (ret)
+ *count = 0;
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t data[2] = {0, 0};
+ uint32_t param;
+ int ret = 0;
+ int i, offset;
+ u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ?
+ SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ offset = reg_idx * 8;
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc);
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n",
+ reg_idx, offset);
+ break;
+ }
+ }
+ up_read(&adev->reset_domain->sem);
+
+ if (!ret)
+ *val = (uint64_t)data[1] << 32 | data[0];
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = {
+ .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count,
+ .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
new file mode 100644
index 000000000000..71c614ae1ae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_MP1_V13_0_H__
+#define __AMDGPU_RAS_MP1_V13_0_H__
+#include "ras.h"
+
+extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
new file mode 100644
index 000000000000..2783f5875c7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+#include "nbio/nbio_7_9_0_offset.h"
+#include "nbio/nbio_7_9_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+
+static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
+ .set = nbio_v7_9_set_ras_controller_irq_state,
+ .process = nbio_v7_9_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_9_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_9_process_err_event_athub_irq,
+};
+
+static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_9_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core,
+ bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_9_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = {
+ .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt,
+ .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
new file mode 100644
index 000000000000..272259e9a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RAS_NBIO_V7_9_H__
+#define __AMDGPU_RAS_NBIO_V7_9_H__
+
+extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
new file mode 100644
index 000000000000..6727fc9a2b9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_process.h"
+
+#define RAS_MGR_RETIRE_PAGE_INTERVAL 100
+
+static void ras_process_retire_page_dwork(struct work_struct *work)
+{
+ struct amdgpu_ras_mgr *ras_mgr =
+ container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work);
+ struct amdgpu_device *adev = ras_mgr->adev;
+ int ret;
+
+ if (amdgpu_ras_is_rma(adev))
+ return;
+
+ /* If gpu reset is ongoing, delay retiring the bad pages */
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3));
+ return;
+ }
+
+ ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL);
+ if (!ret)
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL));
+}
+
+int amdgpu_ras_process_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork);
+
+ return 0;
+}
+
+int amdgpu_ras_process_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ /* Save all cached bad pages to eeprom */
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ cancel_delayed_work_sync(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr->ras_core)
+ return -EINVAL;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true);
+}
+
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data)
+{
+ amdgpu_ras_set_fed(adev, true);
+ return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET);
+}
+
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ struct ras_event_req req;
+ uint64_t seqno;
+
+ if (!ih_info)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.block = ih_info->block;
+ req.data = ih_info->data;
+ req.pasid = ih_info->pasid;
+ req.pasid_fn = ih_info->pasid_fn;
+ req.reset = ih_info->reset;
+
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+
+ /* When the ACA register cannot be read from FW, the poison
+ * consumption seqno in the fifo will not pop up, so it is
+ * necessary to check whether the seqno is the previous seqno.
+ */
+ if (seqno == ras_mgr->last_poison_consumption_seqno) {
+ /* Pop and discard the previous seqno */
+ ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+ }
+ ras_mgr->last_poison_consumption_seqno = seqno;
+ req.seqno = seqno;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false);
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
new file mode 100644
index 000000000000..b9502bd21beb
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_PROCESS_H__
+#define __AMDGPU_RAS_PROCESS_H__
+#include "ras_process.h"
+#include "amdgpu_ras_mgr.h"
+
+enum ras_ih_type;
+int amdgpu_ras_process_init(struct amdgpu_device *adev);
+int amdgpu_ras_process_fini(struct amdgpu_device *adev);
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev,
+ void *data);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
new file mode 100644
index 000000000000..f21cd55a25be
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+
+static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+ uint64_t seq_no;
+
+ ret = amdgpu_ras_global_ras_isr(adev);
+ if (ret)
+ return ret;
+
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE);
+ RAS_DEV_INFO(adev,
+ "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n",
+ seq_no);
+
+ return amdgpu_ras_process_handle_unexpected_interrupt(adev, data);
+}
+
+static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core,
+ void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_event_req *req = (struct ras_event_req *)data;
+ pasid_notify pasid_fn;
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->pasid_fn) {
+ pasid_fn = (pasid_notify)req->pasid_fn;
+ pasid_fn(adev, req->pasid, req->data);
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct ras_event_state *event_state;
+ struct amdgpu_hive_info *hive;
+ enum ras_event_type event_type;
+ uint64_t seq_no;
+
+ if (!ras_mgr || !seqno ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return -EINVAL;
+
+ switch (seqno_type) {
+ case RAS_SEQNO_TYPE_UE:
+ event_type = RAS_EVENT_TYPE_FATAL;
+ break;
+ case RAS_SEQNO_TYPE_CE:
+ case RAS_SEQNO_TYPE_DE:
+ event_type = RAS_EVENT_TYPE_POISON_CREATION;
+ break;
+ case RAS_SEQNO_TYPE_POISON_CONSUMPTION:
+ event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
+ break;
+ default:
+ event_type = RAS_EVENT_TYPE_INVALID;
+ break;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+ event_state = &event_mgr->event_state[event_type];
+ if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) {
+ seq_no = event_state->last_seqno;
+ } else {
+ seq_no = atomic64_inc_return(&event_mgr->seqno);
+ event_state->last_seqno = seq_no;
+ atomic64_inc(&event_state->count);
+ }
+ amdgpu_put_xgmi_hive(hive);
+
+ *seqno = seq_no;
+ return 0;
+
+}
+
+static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev);
+ int ret = 0;
+
+ switch (event_id) {
+ case RAS_EVENT_ID__BAD_PAGE_DETECTED:
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ break;
+ case RAS_EVENT_ID__POISON_CONSUMPTION:
+ amdgpu_ras_sys_poison_consumption_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__RESERVE_BAD_PAGE:
+ ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data);
+ break;
+ case RAS_EVENT_ID__FATAL_ERROR_DETECTED:
+ ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM:
+ ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP:
+ ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__DEVICE_RMA:
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL);
+ ret = amdgpu_dpm_send_rma_reason(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RESET_GPU:
+ ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ return ktime_get_real_seconds();
+}
+
+static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core,
+ uint32_t *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t gpu_status = 0;
+
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev))
+ gpu_status |= RAS_GPU_STATUS__IN_RESET;
+
+ if (amdgpu_sriov_vf(adev))
+ gpu_status |= RAS_GPU_STATUS__IS_VF;
+
+ *status = gpu_status;
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+
+ dev_info->device_id = adev->pdev->device;
+ dev_info->vendor_id = adev->pdev->vendor;
+ dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core,
+ bool down, bool try)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret = 0;
+
+ if (down && try)
+ ret = down_read_trylock(&adev->reset_domain->sem);
+ else if (down)
+ down_read(&adev->reset_domain->sem);
+ else
+ up_read(&adev->reset_domain->sem);
+
+ return ret;
+}
+
+static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct psp_context *psp = &adev->psp;
+ struct psp_ring *psp_ring;
+ struct ta_mem_context *mem_ctx;
+
+ if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) {
+ psp_ring = &psp->km_ring;
+ gpu_mem->mem_bo = adev->firmware.rbuf;
+ gpu_mem->mem_size = psp_ring->ring_size;
+ gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr;
+ gpu_mem->mem_cpu_addr = psp_ring->ring_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) {
+ gpu_mem->mem_bo = psp->cmd_buf_bo;
+ gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->cmd_buf_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) {
+ gpu_mem->mem_bo = psp->fence_buf_bo;
+ gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fence_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) {
+ gpu_mem->mem_bo = psp->fw_pri_bo;
+ gpu_mem->mem_size = PSP_1_MEG;
+ gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fw_pri_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) {
+ mem_ctx = &psp->ras_context.context.mem_context;
+ gpu_mem->mem_bo = mem_ctx->shared_bo;
+ gpu_mem->mem_size = mem_ctx->shared_mem_size;
+ gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr;
+ gpu_mem->mem_cpu_addr = mem_ctx->shared_buf;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!gpu_mem->mem_bo || !gpu_mem->mem_size ||
+ !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) {
+ RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+
+ return 0;
+}
+
+const struct ras_sys_func amdgpu_ras_sys_fn = {
+ .ras_notifier = amdgpu_ras_sys_event_notifier,
+ .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp,
+ .gen_seqno = amdgpu_ras_sys_gen_seqno,
+ .check_gpu_status = amdgpu_ras_sys_check_gpu_status,
+ .get_device_system_info = amdgpu_ras_sys_get_device_system_info,
+ .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock,
+ .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt,
+ .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem,
+ .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
new file mode 100644
index 000000000000..8156531a7b63
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_SYS_H__
+#define __RAS_SYS_H__
+#include <linux/stdarg.h>
+#include <linux/printk.h>
+#include <linux/dev_printk.h>
+#include <linux/mempool.h>
+#include "amdgpu.h"
+
+#define RAS_DEV_ERR(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_ERR fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_WARN(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_WARNING fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_INFO(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_INFO fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_DBG(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__)
+
+#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ 0, ip##_HWIP, inst); \
+})
+
+#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+ value, 0, ip##_HWIP, inst); \
+})
+
+/* GET_INST returns the physical instance corresponding to a logical instance */
+#define RAS_GET_INST(dev, ip, inst) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ adev->ip_map.logical_to_dev_inst ? \
+ adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \
+})
+
+#define RAS_GET_MASK(dev, ip, mask) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ (adev->ip_map.logical_to_dev_mask ? \
+ adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \
+})
+
+static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter)
+{
+ return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index);
+}
+
+static inline long ras_wait_event_interruptible_timeout(void *wq_head,
+ int (*condition)(void *param), void *param, unsigned int timeout)
+{
+ return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head,
+ condition(param), timeout);
+}
+
+extern const struct ras_sys_func amdgpu_ras_sys_fn;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
index e69de29bb2d1..e826a1f86424 100644
--- a/drivers/gpu/drm/amd/ras/rascore/Makefile
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
@@ -0,0 +1,44 @@
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+RAS_CORE_FILES = ras_core.o \
+ ras_mp1.o \
+ ras_mp1_v13_0.o \
+ ras_aca.o \
+ ras_aca_v1_0.o \
+ ras_eeprom.o \
+ ras_umc.o \
+ ras_umc_v12_0.o \
+ ras_cmd.o \
+ ras_gfx.o \
+ ras_gfx_v9_0.o \
+ ras_process.o \
+ ras_nbio.o \
+ ras_nbio_v7_9.o \
+ ras_log_ring.o \
+ ras_cper.o \
+ ras_psp.o \
+ ras_psp_v13_0.o
+
+
+RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_CORE)
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h
new file mode 100644
index 000000000000..fa224b36e3f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+#include "ras_sys.h"
+#include "ras_umc.h"
+#include "ras_aca.h"
+#include "ras_eeprom.h"
+#include "ras_core_status.h"
+#include "ras_process.h"
+#include "ras_gfx.h"
+#include "ras_cmd.h"
+#include "ras_nbio.h"
+#include "ras_mp1.h"
+#include "ras_psp.h"
+#include "ras_log_ring.h"
+
+#define RAS_HW_ERR "[Hardware Error]: "
+
+#define RAS_GPU_PAGE_SHIFT 12
+#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT)
+#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT)
+
+#define RAS_CORE_RESET_GPU 0x10000
+
+#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001)
+#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002)
+#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004)
+
+enum ras_block_id {
+ RAS_BLOCK_ID__UMC = 0,
+ RAS_BLOCK_ID__SDMA,
+ RAS_BLOCK_ID__GFX,
+ RAS_BLOCK_ID__MMHUB,
+ RAS_BLOCK_ID__ATHUB,
+ RAS_BLOCK_ID__PCIE_BIF,
+ RAS_BLOCK_ID__HDP,
+ RAS_BLOCK_ID__XGMI_WAFL,
+ RAS_BLOCK_ID__DF,
+ RAS_BLOCK_ID__SMN,
+ RAS_BLOCK_ID__SEM,
+ RAS_BLOCK_ID__MP0,
+ RAS_BLOCK_ID__MP1,
+ RAS_BLOCK_ID__FUSE,
+ RAS_BLOCK_ID__MCA,
+ RAS_BLOCK_ID__VCN,
+ RAS_BLOCK_ID__JPEG,
+ RAS_BLOCK_ID__IH,
+ RAS_BLOCK_ID__MPIO,
+
+ RAS_BLOCK_ID__LAST
+};
+
+enum ras_ecc_err_type {
+ RAS_ECC_ERR__NONE = 0,
+ RAS_ECC_ERR__PARITY = 1,
+ RAS_ECC_ERR__SINGLE_CORRECTABLE = 2,
+ RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4,
+ RAS_ECC_ERR__POISON = 8,
+};
+
+enum ras_err_type {
+ RAS_ERR_TYPE__UE = 0,
+ RAS_ERR_TYPE__CE,
+ RAS_ERR_TYPE__DE,
+ RAS_ERR_TYPE__LAST
+};
+
+enum ras_seqno_type {
+ RAS_SEQNO_TYPE_INVALID = 0,
+ RAS_SEQNO_TYPE_UE,
+ RAS_SEQNO_TYPE_CE,
+ RAS_SEQNO_TYPE_DE,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION,
+ RAS_SEQNO_TYPE_COUNT_MAX,
+};
+
+enum ras_seqno_fifo {
+ SEQNO_FIFO_INVALID = 0,
+ SEQNO_FIFO_POISON_CREATION,
+ SEQNO_FIFO_POISON_CONSUMPTION,
+ SEQNO_FIFO_COUNT_MAX
+};
+
+enum ras_notify_event {
+ RAS_EVENT_ID__NONE,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED,
+ RAS_EVENT_ID__POISON_CONSUMPTION,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE,
+ RAS_EVENT_ID__DEVICE_RMA,
+ RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED,
+ RAS_EVENT_ID__RESET_GPU,
+ RAS_EVENT_ID__RESET_VF,
+};
+
+enum ras_gpu_status {
+ RAS_GPU_STATUS__NOT_READY = 0,
+ RAS_GPU_STATUS__READY = 0x1,
+ RAS_GPU_STATUS__IN_RESET = 0x2,
+ RAS_GPU_STATUS__IS_RMA = 0x4,
+ RAS_GPU_STATUS__IS_VF = 0x8,
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+struct ras_umc;
+struct ras_aca;
+struct ras_process;
+struct ras_nbio;
+struct ras_log_ring;
+struct ras_psp;
+
+struct ras_mp1_sys_func {
+ int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core,
+ u32 msg, u32 *count);
+ int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_eeprom_sys_func {
+ int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read);
+ int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio_sys_func {
+ int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+ int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+};
+
+struct ras_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ long tm_year;
+};
+
+struct device_system_info {
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t socket_id;
+};
+
+enum gpu_mem_type {
+ GPU_MEM_TYPE_DEFAULT,
+ GPU_MEM_TYPE_RAS_PSP_RING,
+ GPU_MEM_TYPE_RAS_PSP_CMD,
+ GPU_MEM_TYPE_RAS_PSP_FENCE,
+ GPU_MEM_TYPE_RAS_TA_FW,
+ GPU_MEM_TYPE_RAS_TA_CMD,
+};
+
+struct ras_psp_sys_func {
+ int (*get_ras_psp_system_status)(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status);
+ int (*get_ras_ta_init_param)(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param);
+};
+
+struct ras_sys_func {
+ int (*gpu_reset_lock)(struct ras_core_context *ras_core,
+ bool down, bool try);
+ int (*check_gpu_status)(struct ras_core_context *ras_core,
+ uint32_t *status);
+ int (*gen_seqno)(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno);
+ int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data);
+ int (*ras_notifier)(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+ u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core);
+ int (*get_device_system_info)(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+ bool (*detect_ras_interrupt)(struct ras_core_context *ras_core);
+ int (*get_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+ int (*put_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+};
+
+struct ras_ecc_count {
+ uint64_t new_ce_count;
+ uint64_t total_ce_count;
+ uint64_t new_ue_count;
+ uint64_t total_ue_count;
+ uint64_t new_de_count;
+ uint64_t total_de_count;
+};
+
+struct ras_bank_ecc {
+ uint32_t nps;
+ uint64_t seq_no;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct ras_bank_ecc_node {
+ struct list_head node;
+ struct ras_bank_ecc ecc;
+};
+
+struct ras_aca_config {
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+};
+
+struct ras_mp1_config {
+ const struct ras_mp1_sys_func *mp1_sys_fn;
+};
+
+struct ras_nbio_config {
+ const struct ras_nbio_sys_func *nbio_sys_fn;
+};
+
+struct ras_psp_config {
+ const struct ras_psp_sys_func *psp_sys_fn;
+};
+
+struct ras_umc_config {
+ uint32_t umc_vram_type;
+};
+
+struct ras_eeprom_config {
+ const struct ras_eeprom_sys_func *eeprom_sys_fn;
+ int eeprom_record_threshold_config;
+ uint32_t eeprom_record_threshold_count;
+ void *eeprom_i2c_adapter;
+ u32 eeprom_i2c_addr;
+ u32 eeprom_i2c_port;
+ u16 max_i2c_read_len;
+ u16 max_i2c_write_len;
+};
+
+struct ras_core_config {
+ u32 aca_ip_version;
+ u32 umc_ip_version;
+ u32 mp1_ip_version;
+ u32 gfx_ip_version;
+ u32 nbio_ip_version;
+ u32 psp_ip_version;
+
+ bool poison_supported;
+ bool ras_eeprom_supported;
+ const struct ras_sys_func *sys_fn;
+
+ struct ras_aca_config aca_cfg;
+ struct ras_mp1_config mp1_cfg;
+ struct ras_nbio_config nbio_cfg;
+ struct ras_psp_config psp_cfg;
+ struct ras_eeprom_config eeprom_cfg;
+ struct ras_umc_config umc_cfg;
+};
+
+struct ras_core_context {
+ void *dev;
+ struct ras_core_config *config;
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+ int max_ue_banks_per_query;
+ int max_ce_banks_per_query;
+ struct ras_aca ras_aca;
+
+ bool ras_eeprom_supported;
+ struct ras_eeprom_control ras_eeprom;
+
+ struct ras_psp ras_psp;
+ struct ras_umc ras_umc;
+ struct ras_nbio ras_nbio;
+ struct ras_gfx ras_gfx;
+ struct ras_mp1 ras_mp1;
+ struct ras_process ras_proc;
+ struct ras_cmd_mgr ras_cmd;
+ struct ras_log_ring ras_log_ring;
+
+ const struct ras_sys_func *sys_fn;
+
+ /* is poison mode supported */
+ bool poison_supported;
+
+ bool is_rma;
+ bool is_initialized;
+
+ struct kfifo de_seqno_fifo;
+ struct kfifo consumption_seqno_fifo;
+ spinlock_t seqno_lock;
+
+ bool ras_core_enabled;
+};
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config);
+void ras_core_destroy(struct ras_core_context *ras_core);
+int ras_core_sw_init(struct ras_core_context *ras_core);
+int ras_core_sw_fini(struct ras_core_context *ras_core);
+int ras_core_hw_init(struct ras_core_context *ras_core);
+int ras_core_hw_fini(struct ras_core_context *ras_core);
+bool ras_core_is_ready(struct ras_core_context *ras_core);
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type);
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop);
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno);
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core);
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count);
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core);
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data);
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core);
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core);
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id);
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm);
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable);
+bool ras_core_is_enabled(struct ras_core_context *ras_core);
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core);
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core);
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core);
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core);
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
new file mode 100644
index 000000000000..e433c70d2989
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_aca_v1_0.h"
+#include "ras_mp1_v13_0.h"
+
+#define ACA_MARK_FATAL_FLAG 0x100
+#define ACA_MARK_UE_READ_FLAG 0x1
+
+#define blk_name(block_id) ras_core_get_ras_block_name(block_id)
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX__CTL},
+ {"STATUS", ACA_REG_IDX__STATUS},
+ {"ADDR", ACA_REG_IDX__ADDR},
+ {"MISC", ACA_REG_IDX__MISC0},
+ {"CONFIG", ACA_REG_IDX__CONFG},
+ {"IPID", ACA_REG_IDX__IPID},
+ {"SYND", ACA_REG_IDX__SYND},
+ {"DESTAT", ACA_REG_IDX__DESTAT},
+ {"DEADDR", ACA_REG_IDX__DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK},
+};
+
+
+static void aca_report_ecc_info(struct ras_core_context *ras_core,
+ u64 seq_no, u32 blk, u32 skt, u32 aid,
+ struct aca_aid_ecc *aid_ecc,
+ struct aca_bank_ecc *new_ecc)
+{
+ struct aca_ecc_count ecc_count = {0};
+
+ ecc_count.new_ue_count = new_ecc->ue_count;
+ ecc_count.new_de_count = new_ecc->de_count;
+ ecc_count.new_ce_count = new_ecc->ce_count;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ struct aca_ecc_count *xcd_ecc;
+ int xcd_id;
+
+ for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) {
+ xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err;
+ ecc_count.total_ue_count += xcd_ecc->total_ue_count;
+ ecc_count.total_de_count += xcd_ecc->total_de_count;
+ ecc_count.total_ce_count += xcd_ecc->total_ce_count;
+ }
+ } else {
+ ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count;
+ ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count;
+ ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count;
+ }
+
+ if (ecc_count.new_ue_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk));
+ }
+
+ if (ecc_count.new_de_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ }
+
+ if (ecc_count.new_ce_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk));
+ }
+}
+
+static void aca_bank_log(struct ras_core_context *ras_core,
+ int idx, int total, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ int i;
+
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n",
+ bank->seq_no);
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ bank->seq_no, idx + 1, total,
+ aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static void aca_log_bank_data(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc,
+ struct ras_log_batch_tag *batch)
+{
+ if (bank_ecc->ue_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch);
+ else if (bank_ecc->de_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch);
+ else
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch);
+}
+
+static int aca_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ return ras_mp1_get_bank_count(ras_core, type, count);
+}
+
+static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank)
+{
+ const struct aca_bank_hw_ops *bank_ops;
+
+ if (!aca_blk->blk_info)
+ return false;
+
+ bank_ops = &aca_blk->blk_info->bank_ops;
+ if (!bank_ops->bank_match)
+ return false;
+
+ return bank_ops->bank_match(aca_blk, bank);
+}
+
+static int aca_parse_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk,
+ struct aca_bank_reg *bank,
+ struct aca_bank_ecc *ecc)
+{
+ const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops;
+
+ if (!bank_ops || !bank_ops->bank_parse)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc);
+}
+
+static int aca_check_block_ecc_info(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_ecc_info *info)
+{
+ if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Socket id (%d) is out of config! max:%u\n",
+ info->socket_id, aca_blk->ecc.socket_num_per_hive);
+ return -ENODATA;
+ }
+
+ if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Die id (%d) is out of config! max:%u\n",
+ info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num);
+ return -ENODATA;
+ }
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) &&
+ (info->xcd_id >=
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Xcd id (%d) is out of config! max:%u\n",
+ info->xcd_id,
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int aca_log_bad_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ struct aca_ecc_info *info;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+ int ret;
+
+ info = &bank_ecc->bank_info;
+
+ ret = aca_check_block_ecc_info(ras_core, aca_blk, info);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id];
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)
+ ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err;
+ else
+ ecc_err = &aid_ecc->ecc_err;
+
+ ecc_err->new_ce_count += bank_ecc->ce_count;
+ ecc_err->total_ce_count += bank_ecc->ce_count;
+ ecc_err->new_ue_count += bank_ecc->ue_count;
+ ecc_err->total_ue_count += bank_ecc->ue_count;
+ ecc_err->new_de_count += bank_ecc->de_count;
+ ecc_err->total_de_count += bank_ecc->de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) &&
+ bank_ecc->de_count) {
+ struct ras_bank_ecc ras_ecc = {0};
+
+ ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core);
+ ras_ecc.addr = bank_ecc->bank_info.addr;
+ ras_ecc.ipid = bank_ecc->bank_info.ipid;
+ ras_ecc.status = bank_ecc->bank_info.status;
+ ras_ecc.seq_no = bank->seq_no;
+
+ if (ras_core_gpu_in_reset(ras_core))
+ ras_umc_log_bad_bank_pending(ras_core, &ras_ecc);
+ else
+ ras_umc_log_bad_bank(ras_core, &ras_ecc);
+ }
+
+ aca_report_ecc_info(ras_core,
+ bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id,
+ &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc);
+
+ return 0;
+}
+
+static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank)
+{
+ int i = 0;
+
+ for (i = 0; i < RAS_BLOCK_ID__LAST; i++)
+ if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank))
+ return &ras_core->ras_aca.aca_blk[i];
+
+ return NULL;
+}
+
+static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type,
+ int idx, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ int i, ret, reg_cnt;
+
+ reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < reg_cnt; i++) {
+ ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core,
+ enum ras_err_type err_type, struct aca_block *aca_blk,
+ struct aca_bank_ecc *bank_ecc)
+{
+ uint64_t seq_no = 0;
+
+ if (bank_ecc->de_count) {
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC)
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true);
+ else
+ seq_no = ras_core_get_seqno(ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ } else if (bank_ecc->ue_count) {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true);
+ } else {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true);
+ }
+
+ return seq_no;
+}
+
+static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core,
+ u32 ecc_type)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (ecc_type != RAS_ERR_TYPE__UE)
+ return false;
+
+ if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) {
+ if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)
+ return true;
+
+ aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG;
+ }
+
+ return false;
+}
+
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG;
+}
+
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) &&
+ (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG))
+ aca->ue_updated_mark = 0;
+}
+
+static int aca_banks_update(struct ras_core_context *ras_core,
+ u32 ecc_type, void *data)
+{
+ struct aca_bank_reg bank;
+ struct aca_block *aca_blk;
+ struct aca_bank_ecc bank_ecc;
+ struct ras_log_batch_tag *batch_tag = NULL;
+ u32 count = 0;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ras_core->ras_aca.bank_op_lock);
+
+ if (aca_dup_update_ue_in_fatal(ras_core, ecc_type))
+ goto out;
+
+ ret = aca_get_bank_count(ras_core, ecc_type, &count);
+ if (ret)
+ goto out;
+
+ if (!count)
+ goto out;
+
+ batch_tag = ras_log_ring_create_batch_tag(ras_core);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = aca_dump_bank(ras_core, ecc_type, i, &bank);
+ if (ret)
+ break;
+
+ bank.ecc_type = ecc_type;
+
+ memset(&bank_ecc, 0, sizeof(bank_ecc));
+ aca_blk = aca_get_bank_aca_block(ras_core, &bank);
+ if (aca_blk)
+ ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc);
+
+ aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag);
+ aca_bank_log(ras_core, i, count, &bank, &bank_ecc);
+
+ if (!ret && aca_blk)
+ ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ if (ret)
+ break;
+ }
+ ras_log_ring_destroy_batch_tag(ras_core, batch_tag);
+
+out:
+ mutex_unlock(&ras_core->ras_aca.bank_op_lock);
+ return ret;
+}
+
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data)
+{
+ /* Update aca bank to aca source error_cache first */
+ return aca_banks_update(ras_core, type, data);
+}
+
+static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk)
+{
+ return &ras_core->ras_aca.aca_blk[blk];
+}
+
+static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ struct aca_aid_ecc *aid_ecc;
+ int skt, aid, xcd;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++)
+ memset(&aid_ecc->xcd.xcd[xcd],
+ 0, sizeof(struct aca_xcd_ecc));
+ } else {
+ memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err));
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core)
+{
+ enum ras_block_id blk;
+ int ret;
+
+ for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) {
+ ret = ras_aca_clear_block_ecc_count(ras_core, blk);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) {
+ ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ } else {
+ ecc_err = &aid_ecc->ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, u32 skt, u32 aid, u32 xcd,
+ struct aca_ecc_count *ecc_count)
+{
+ struct aca_block *aca_blk;
+ struct aca_ecc_count *ecc_err;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ if (blk == RAS_BLOCK_ID__GFX)
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err;
+ else
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err;
+
+ ecc_count->new_ce_count = ecc_err->new_ce_count;
+ ecc_count->total_ce_count = ecc_err->total_ce_count;
+ ecc_count->new_ue_count = ecc_err->new_ue_count;
+ ecc_count->total_ue_count = ecc_err->total_ue_count;
+ ecc_count->new_de_count = ecc_err->new_de_count;
+ ecc_count->total_de_count = ecc_err->total_de_count;
+
+ return 0;
+}
+
+static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src)
+{
+ des->new_ce_count += src->new_ce_count;
+ des->total_ce_count += src->total_ce_count;
+ des->new_ue_count += src->new_ue_count;
+ des->total_ue_count += src->total_ue_count;
+ des->new_de_count += src->new_de_count;
+ des->total_de_count += src->total_de_count;
+}
+
+static const struct ras_aca_ip_func *aca_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(1, 0, 0):
+ return &ras_aca_func_v1_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "ACA ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, void *data)
+{
+ struct ras_ecc_count *err_data = (struct ras_ecc_count *)data;
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count ecc_xcd;
+ struct aca_ecc_count ecc_aid;
+ struct aca_ecc_count ecc;
+
+ if (blk >= RAS_BLOCK_ID__LAST)
+ return -EINVAL;
+
+ if (!err_data)
+ return -EINVAL;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ memset(&ecc, 0, sizeof(ecc));
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ for (xcd = 0;
+ xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num;
+ xcd++) {
+ memset(&ecc_xcd, 0, sizeof(ecc_xcd));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, xcd, &ecc_xcd))
+ continue;
+ _add_ecc_count(&ecc_aid, &ecc_xcd);
+ }
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ } else {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, 0, &ecc_aid))
+ continue;
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ }
+
+ err_data->new_ce_count = ecc.new_ce_count;
+ err_data->total_ce_count = ecc.total_ce_count;
+ err_data->new_ue_count = ecc.new_ue_count;
+ err_data->total_ue_count = ecc.total_ue_count;
+ err_data->new_de_count = ecc.new_de_count;
+ err_data->total_de_count = ecc.total_de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg;
+ struct aca_block *aca_blk;
+ uint32_t socket_num_per_hive;
+ uint32_t aid_num_per_socket;
+ uint32_t xcd_num_per_aid;
+ int blk, skt, aid;
+
+ socket_num_per_hive = aca_cfg->socket_num_per_hive;
+ aid_num_per_socket = aca_cfg->aid_num_per_socket;
+ xcd_num_per_aid = aca_cfg->xcd_num_per_aid;
+
+ if (!xcd_num_per_aid || !aid_num_per_socket ||
+ (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) ||
+ (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) ||
+ (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n",
+ socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid);
+ return -EINVAL;
+ }
+
+ memset(ras_aca, 0, sizeof(*ras_aca));
+
+ for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) {
+ aca_blk = &ras_aca->aca_blk[blk];
+ aca_blk->ecc.socket_num_per_hive = socket_num_per_hive;
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++)
+ aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num =
+ xcd_num_per_aid;
+ }
+ }
+ }
+
+ mutex_init(&ras_aca->aca_lock);
+ mutex_init(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ mutex_destroy(&ras_aca->aca_lock);
+ mutex_destroy(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct aca_block *aca_blk;
+ const struct ras_aca_ip_func *ip_func;
+ int i;
+
+ ras_aca->aca_ip_version = ras_core->config->aca_ip_version;
+ ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version);
+ if (!ip_func)
+ return -EINVAL;
+
+ for (i = 0; i < ip_func->block_num; i++) {
+ aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id];
+ aca_blk->blk_info = ip_func->block_info[i];
+ }
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
+
+int ras_aca_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
new file mode 100644
index 000000000000..f61b02a5f0fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_H__
+#define __RAS_ACA_H__
+#include "ras.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+#define MAX_ACA_RAS_BLOCK 20
+
+#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE)
+#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE)
+#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE)
+
+enum ras_aca_reg_idx {
+ ACA_REG_IDX__CTL = 0,
+ ACA_REG_IDX__STATUS = 1,
+ ACA_REG_IDX__ADDR = 2,
+ ACA_REG_IDX__MISC0 = 3,
+ ACA_REG_IDX__CONFG = 4,
+ ACA_REG_IDX__IPID = 5,
+ ACA_REG_IDX__SYND = 6,
+ ACA_REG_IDX__DESTAT = 8,
+ ACA_REG_IDX__DEADDR = 9,
+ ACA_REG_IDX__CTL_MASK = 10,
+ ACA_REG_MAX_COUNT = 16,
+};
+
+struct ras_core_context;
+struct aca_block;
+
+struct aca_bank_reg {
+ u32 ecc_type;
+ u64 seq_no;
+ u64 regs[ACA_REG_MAX_COUNT];
+};
+
+enum aca_ecc_hwip {
+ ACA_ECC_HWIP__UNKNOWN = -1,
+ ACA_ECC_HWIP__PSP = 0,
+ ACA_ECC_HWIP__UMC,
+ ACA_ECC_HWIP__SMU,
+ ACA_ECC_HWIP__PCS_XGMI,
+ ACA_ECC_HWIP_COUNT,
+};
+
+struct aca_ecc_info {
+ int die_id;
+ int socket_id;
+ int xcd_id;
+ int hwid;
+ int mcatype;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct aca_bank_ecc {
+ struct aca_ecc_info bank_info;
+ u32 ce_count;
+ u32 ue_count;
+ u32 de_count;
+};
+
+struct aca_ecc_count {
+ u32 new_ce_count;
+ u32 total_ce_count;
+ u32 new_ue_count;
+ u32 total_ue_count;
+ u32 new_de_count;
+ u32 total_de_count;
+};
+
+struct aca_xcd_ecc {
+ struct aca_ecc_count ecc_err;
+};
+
+struct aca_aid_ecc {
+ union {
+ struct aca_xcd {
+ struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID];
+ u32 xcd_num;
+ } xcd;
+ struct aca_ecc_count ecc_err;
+ };
+};
+
+struct aca_socket_ecc {
+ struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET];
+ u32 aid_num;
+};
+
+struct aca_block_ecc {
+ struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE];
+ u32 socket_num_per_hive;
+};
+
+struct aca_bank_hw_ops {
+ bool (*bank_match)(struct aca_block *ras_blk, void *data);
+ int (*bank_parse)(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, void *data, void *buf);
+};
+
+struct aca_block_info {
+ char name[32];
+ u32 ras_block_id;
+ enum aca_ecc_hwip hwip;
+ struct aca_bank_hw_ops bank_ops;
+ u32 mask;
+};
+
+struct aca_block {
+ const struct aca_block_info *blk_info;
+ struct aca_block_ecc ecc;
+};
+
+struct ras_aca_ip_func {
+ uint32_t block_num;
+ const struct aca_block_info **block_info;
+};
+
+struct ras_aca {
+ uint32_t aca_ip_version;
+ const struct ras_aca_ip_func *ip_func;
+ struct mutex aca_lock;
+ struct mutex bank_op_lock;
+ struct aca_block aca_blk[MAX_ACA_RAS_BLOCK];
+ uint32_t ue_updated_mark;
+};
+
+int ras_aca_sw_init(struct ras_core_context *ras_core);
+int ras_aca_sw_fini(struct ras_core_context *ras_core);
+int ras_aca_hw_init(struct ras_core_context *ras_core);
+int ras_aca_hw_fini(struct ras_core_context *ras_core);
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data);
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk);
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core);
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data);
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core);
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
new file mode 100644
index 000000000000..29df98948703
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_core_status.h"
+#include "ras_aca_v1_0.h"
+
+struct ras_aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = {
+ [ACA_ECC_HWIP__SMU] = {0x01, 0x01},
+ [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00},
+ [ACA_ECC_HWIP__UMC] = {0x96, 0x00},
+};
+
+static int aca_decode_bank_info(struct aca_block *aca_blk,
+ struct aca_bank_reg *bank, struct aca_ecc_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ info->hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ info->mcatype = ACA_REG_IPID_MCATYPE(ipid);
+ /*
+ * Unified DieID Format: SAASS. A:AID, S:Socket.
+ * Unified DieID[4:4] = InstanceId[0:0]
+ * Unified DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid);
+ instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) &&
+ (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX))
+ info->xcd_id =
+ ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1;
+
+ return 0;
+}
+
+static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type)
+{
+ struct ras_aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || (type == ACA_ECC_HWIP__UNKNOWN))
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ mcatype = ACA_REG_IPID_MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_match_bank_default(struct aca_block *aca_blk, void *data)
+{
+ return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip);
+}
+
+static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ u32 instlo;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+ static int sdma_err_codes[] = { 33, 34, 35, 36 };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check SDMA error codes */
+ for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) {
+ if (errcode == sdma_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* reference to smu driver if header file */
+ const int mmhub_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+ };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check MMHUB error codes */
+ for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) {
+ if (errcode == mmhub_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ return (ras_core->poison_supported &&
+ ACA_REG_STATUS_VAL(mc_umc_status) &&
+ ACA_REG_STATUS_DEFERRED(mc_umc_status));
+}
+
+static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_PCC(mc_umc_status) ||
+ ACA_REG_STATUS_UC(mc_umc_status) ||
+ ACA_REG_STATUS_TCC(mc_umc_status)));
+}
+
+static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_CECC(mc_umc_status) ||
+ (ACA_REG_STATUS_UECC(mc_umc_status) &&
+ ACA_REG_STATUS_UC(mc_umc_status) == 0) ||
+ /* Identify data parity error in replay mode */
+ ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 ||
+ ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) &&
+ !(aca_check_umc_ue(ras_core, mc_umc_status)))));
+}
+
+static int aca_parse_umc_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk, void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ uint32_t ext_error_code;
+ uint64_t status0;
+
+ status0 = bank->regs[ACA_REG_IDX__STATUS];
+ if (!ACA_REG_STATUS_VAL(status0))
+ return 0;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0);
+
+ if (aca_check_umc_de(ras_core, status0))
+ ecc->de_count = 1;
+ else if (aca_check_umc_ue(ras_core, status0))
+ ecc->ue_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ else if (aca_check_umc_ce(ras_core, status0))
+ ecc->ce_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+
+ return 0;
+}
+
+static bool aca_check_bank_is_de(struct ras_core_context *ras_core,
+ uint64_t status)
+{
+ return (ACA_REG_STATUS_POISON(status) ||
+ ACA_REG_STATUS_DEFERRED(status));
+}
+
+static int aca_parse_bank_default(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 misc0 = bank->regs[ACA_REG_IDX__MISC0];
+ u64 status = bank->regs[ACA_REG_IDX__STATUS];
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = status;
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ if (aca_check_bank_is_de(ras_core, status)) {
+ ecc->de_count = 1;
+ } else {
+ if (bank->ecc_type == RAS_ERR_TYPE__UE)
+ ecc->ue_count = 1;
+ else if (bank->ecc_type == RAS_ERR_TYPE__CE)
+ ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static int aca_parse_xgmi_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 status, count;
+ int ext_error_code;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ status = bank->regs[ACA_REG_IDX__STATUS];
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status);
+
+ count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ if (bank->ecc_type == RAS_ERR_TYPE__UE) {
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+ ecc->ue_count = count;
+ } else if (bank->ecc_type == RAS_ERR_TYPE__CE) {
+ count = ext_error_code == 6 ? count : 0ULL;
+ ecc->ce_count = count;
+ }
+
+ return 0;
+}
+
+static const struct aca_block_info aca_v1_0_umc = {
+ .name = "umc",
+ .ras_block_id = RAS_BLOCK_ID__UMC,
+ .hwip = ACA_ECC_HWIP__UMC,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_umc_bank,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_gfx = {
+ .name = "gfx",
+ .ras_block_id = RAS_BLOCK_ID__GFX,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_gfx_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_sdma = {
+ .name = "sdma",
+ .ras_block_id = RAS_BLOCK_ID__SDMA,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_sdma_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_mmhub = {
+ .name = "mmhub",
+ .ras_block_id = RAS_BLOCK_ID__MMHUB,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_mmhub_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_xgmi = {
+ .name = "xgmi",
+ .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL,
+ .hwip = ACA_ECC_HWIP__PCS_XGMI,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_xgmi_bank,
+ },
+};
+
+static const struct aca_block_info *aca_block_info_v1_0[] = {
+ &aca_v1_0_umc,
+ &aca_v1_0_gfx,
+ &aca_v1_0_sdma,
+ &aca_v1_0_mmhub,
+ &aca_v1_0_xgmi,
+};
+
+const struct ras_aca_ip_func ras_aca_func_v1_0 = {
+ .block_num = ARRAY_SIZE(aca_block_info_v1_0),
+ .block_info = aca_block_info_v1_0,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
new file mode 100644
index 000000000000..40e5d94b037f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_V1_0_H__
+#define __RAS_ACA_V1_0_H__
+#include "ras.h"
+
+#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62)
+#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61)
+#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60)
+#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59)
+#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58)
+#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57)
+#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56)
+#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55)
+#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53)
+#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46)
+#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45)
+#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44)
+#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43)
+#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40)
+#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32)
+#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24)
+#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16)
+#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0)
+
+#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48)
+#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44)
+#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32)
+#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0)
+
+#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48)
+#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32)
+
+#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+
+extern const struct ras_aca_ip_func ras_aca_func_v1_0;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
new file mode 100644
index 000000000000..94e6d7420d94
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_cmd.h"
+
+#define RAS_CMD_MAJOR_VERSION 6
+#define RAS_CMD_MINOR_VERSION 0
+#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION))
+
+static int ras_cmd_add_device(struct ras_core_context *ras_core)
+{
+ INIT_LIST_HEAD(&ras_core->ras_cmd.head);
+ ras_core->ras_cmd.ras_core = ras_core;
+ ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC;
+ return 0;
+}
+
+static int ras_cmd_remove_device(struct ras_core_context *ras_core)
+{
+ memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd));
+ return 0;
+}
+
+static int ras_get_block_ecc_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_block_ecc_info_req *input_data =
+ (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_block_ecc_info_rsp *output_data =
+ (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw;
+ struct ras_ecc_count err_data;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ memset(&err_data, 0, sizeof(err_data));
+ ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->ce_count = err_data.total_ce_count;
+ output_data->ue_count = err_data.total_ue_count;
+ output_data->de_count = err_data.total_de_count;
+
+ cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record,
+ struct eeprom_umc_record *record)
+{
+ ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn;
+ ras_cmd_record->ts = record->ts;
+ ras_cmd_record->err_type = record->err_type;
+ ras_cmd_record->mem_channel = record->mem_channel;
+ ras_cmd_record->mcumc_id = record->mcumc_id;
+ ras_cmd_record->address = record->address;
+ ras_cmd_record->bank = record->bank;
+ ras_cmd_record->valid = 1;
+}
+
+static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core,
+ uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data)
+{
+ struct eeprom_umc_record record;
+ struct ras_cmd_bad_page_record *ras_cmd_record;
+ uint32_t i = 0, bp_cnt = 0, group_cnt = 0;
+
+ output_data->bp_in_group = 0;
+ output_data->group_index = 0;
+
+ bp_cnt = ras_umc_get_badpage_count(ras_core);
+ if (bp_cnt) {
+ output_data->group_index = group_index;
+ group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP
+ + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0);
+
+ if (group_index >= group_cnt)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (;
+ i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ i++) {
+ if (ras_umc_get_badpage_record(ras_core, i, &record))
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+
+ memset(ras_cmd_record, 0, sizeof(*ras_cmd_record));
+ ras_cmd_update_bad_page_info(ras_cmd_record, &record);
+ output_data->bp_in_group++;
+ }
+ }
+ output_data->bp_total_cnt = bp_cnt;
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_bad_pages_info_req *input_data =
+ (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_bad_pages_info_rsp *output_data =
+ (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_eeprom_reset_table(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clean_badpage_data(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_aca_clear_all_blocks_ecc_count(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clear_logged_ecc(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_snapshot_rsp *output_data =
+ (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ output_data->total_cper_num = overview.logged_batch_count;
+ output_data->start_cper_id = overview.first_batch_id;
+ output_data->latest_cper_id = overview.last_batch_id;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_record_req *req =
+ (struct ras_cmd_cper_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_cper_record_rsp *rsp =
+ (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_batch_overview overview;
+ uint32_t offset = 0, real_data_len = 0;
+ uint64_t batch_id;
+ uint8_t *buffer;
+ int ret = 0, i, count;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (!req->buf_size || !req->buf_ptr || !req->cper_num)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ buffer = kzalloc(req->buf_size, GFP_KERNEL);
+ if (!buffer)
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ for (i = 0; i < req->cper_num; i++) {
+ batch_id = req->cper_start_id + i;
+ if (batch_id >= overview.last_batch_id)
+ break;
+
+ count = ras_log_ring_get_batch_records(ras_core, batch_id, trace,
+ ARRAY_SIZE(trace));
+ if (count > 0) {
+ ret = ras_cper_generate_cper(ras_core, trace, count,
+ &buffer[offset], req->buf_size - offset, &real_data_len);
+ if (ret)
+ break;
+
+ offset += real_data_len;
+ }
+ }
+
+ if ((ret && (ret != -ENOMEM)) ||
+ copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) {
+ kfree(buffer);
+ return RAS_CMD__ERROR_GENERIC;
+ }
+
+ rsp->real_data_size = offset;
+ rsp->real_cper_num = i;
+ rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp);
+
+ kfree(buffer);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_snapshot_rsp *rsp =
+ (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ rsp->total_batch_num = overview.logged_batch_count;
+ rsp->start_batch_id = overview.first_batch_id;
+ rsp->latest_batch_id = overview.last_batch_id;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_record_req *input_data =
+ (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_batch_trace_record_rsp *output_data =
+ (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+ struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_info *record;
+ int i, j, count = 0, offset = 0;
+ uint64_t id;
+ bool completed = false;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ if ((input_data->start_batch_id < overview.first_batch_id) ||
+ (input_data->start_batch_id >= overview.last_batch_id))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ for (i = 0; i < input_data->batch_num; i++) {
+ id = input_data->start_batch_id + i;
+ if (id >= overview.last_batch_id) {
+ completed = true;
+ break;
+ }
+
+ count = ras_log_ring_get_batch_records(ras_core,
+ id, trace_arry, ARRAY_SIZE(trace_arry));
+ if (count > 0) {
+ if ((offset + count) > RAS_CMD_MAX_TRACE_NUM)
+ break;
+ for (j = 0; j < count; j++) {
+ record = &output_data->records[offset + j];
+ record->seqno = trace_arry[j]->seqno;
+ record->timestamp = trace_arry[j]->timestamp;
+ record->event = trace_arry[j]->event;
+ memcpy(&record->aca_reg,
+ &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg));
+ }
+ } else {
+ count = 0;
+ }
+
+ output_data->batchs[i].batch_id = id;
+ output_data->batchs[i].offset = offset;
+ output_data->batchs[i].trace_num = count;
+ offset += count;
+ }
+
+ output_data->start_batch_id = input_data->start_batch_id;
+ output_data->real_batch_num = i;
+ output_data->remain_num = completed ? 0 : (input_data->batch_num - i);
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block)
+{
+ switch (block) {
+ case RAS_BLOCK_ID__UMC:
+ return RAS_TA_BLOCK__UMC;
+ case RAS_BLOCK_ID__SDMA:
+ return RAS_TA_BLOCK__SDMA;
+ case RAS_BLOCK_ID__GFX:
+ return RAS_TA_BLOCK__GFX;
+ case RAS_BLOCK_ID__MMHUB:
+ return RAS_TA_BLOCK__MMHUB;
+ case RAS_BLOCK_ID__ATHUB:
+ return RAS_TA_BLOCK__ATHUB;
+ case RAS_BLOCK_ID__PCIE_BIF:
+ return RAS_TA_BLOCK__PCIE_BIF;
+ case RAS_BLOCK_ID__HDP:
+ return RAS_TA_BLOCK__HDP;
+ case RAS_BLOCK_ID__XGMI_WAFL:
+ return RAS_TA_BLOCK__XGMI_WAFL;
+ case RAS_BLOCK_ID__DF:
+ return RAS_TA_BLOCK__DF;
+ case RAS_BLOCK_ID__SMN:
+ return RAS_TA_BLOCK__SMN;
+ case RAS_BLOCK_ID__SEM:
+ return RAS_TA_BLOCK__SEM;
+ case RAS_BLOCK_ID__MP0:
+ return RAS_TA_BLOCK__MP0;
+ case RAS_BLOCK_ID__MP1:
+ return RAS_TA_BLOCK__MP1;
+ case RAS_BLOCK_ID__FUSE:
+ return RAS_TA_BLOCK__FUSE;
+ case RAS_BLOCK_ID__MCA:
+ return RAS_TA_BLOCK__MCA;
+ case RAS_BLOCK_ID__VCN:
+ return RAS_TA_BLOCK__VCN;
+ case RAS_BLOCK_ID__JPEG:
+ return RAS_TA_BLOCK__JPEG;
+ default:
+ return RAS_TA_BLOCK__UMC;
+ }
+}
+
+static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error)
+{
+ switch (error) {
+ case RAS_ECC_ERR__NONE:
+ return RAS_TA_ERROR__NONE;
+ case RAS_ECC_ERR__PARITY:
+ return RAS_TA_ERROR__PARITY;
+ case RAS_ECC_ERR__SINGLE_CORRECTABLE:
+ return RAS_TA_ERROR__SINGLE_CORRECTABLE;
+ case RAS_ECC_ERR__MULTI_UNCORRECTABLE:
+ return RAS_TA_ERROR__MULTI_UNCORRECTABLE;
+ case RAS_ECC_ERR__POISON:
+ return RAS_TA_ERROR__POISON;
+ default:
+ return RAS_TA_ERROR__NONE;
+ }
+}
+
+static int ras_cmd_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ struct ras_cmd_inject_error_rsp *output_data =
+ (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw;
+ int ret = 0;
+ struct ras_ta_trigger_error_input block_info = {
+ .block_id = __get_ras_ta_block(req->block_id),
+ .sub_block_index = req->subblock_id,
+ .inject_error_type = __get_ras_ta_err_type(req->error_type),
+ .address = req->address,
+ .value = req->method,
+ };
+
+ ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask);
+ if (!ret) {
+ output_data->version = 0;
+ output_data->address = block_info.address;
+ cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ return ret;
+}
+
+static struct ras_cmd_func_map ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error},
+ {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info},
+ {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages},
+ {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info},
+ {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts},
+ {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot},
+ {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records},
+ {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot},
+ {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records},
+};
+
+int rascore_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) {
+ if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (!ras_cmd)
+ return RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return ras_cmd->func(ras_core, cmd, data);
+}
+
+int ras_cmd_init(struct ras_core_context *ras_core)
+{
+ return ras_cmd_add_device(ras_core);
+}
+
+int ras_cmd_fini(struct ras_core_context *ras_core)
+{
+ ras_cmd_remove_device(ras_core);
+ return 0;
+}
+
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp)
+{
+ rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION;
+ rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION;
+
+ return 0;
+}
+
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr)
+{
+ struct umc_bank_addr umc_bank = {0};
+ int ret;
+
+ ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ bank_addr->stack_id = umc_bank.stack_id;
+ bank_addr->bank_group = umc_bank.bank_group;
+ bank_addr->bank = umc_bank.bank;
+ bank_addr->row = umc_bank.row;
+ bank_addr->column = umc_bank.column;
+ bank_addr->channel = umc_bank.channel;
+ bank_addr->subchannel = umc_bank.subchannel;
+
+ return 0;
+}
+
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa)
+{
+ struct umc_bank_addr umc_bank = {0};
+
+ umc_bank.stack_id = bank_addr.stack_id;
+ umc_bank.bank_group = bank_addr.bank_group;
+ umc_bank.bank = bank_addr.bank;
+ umc_bank.row = bank_addr.row;
+ umc_bank.column = bank_addr.column;
+ umc_bank.channel = bank_addr.channel;
+ umc_bank.subchannel = bank_addr.subchannel;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true);
+}
+
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_cmd.dev_handle;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
new file mode 100644
index 000000000000..48a0715eb821
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CMD_H__
+#define __RAS_CMD_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL
+
+#define RAS_CMD_MAX_IN_SIZE 256
+#define RAS_CMD_MAX_GPU_NUM 32
+#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+enum ras_cmd_interface_type {
+ RAS_CMD_INTERFACE_TYPE_NONE,
+ RAS_CMD_INTERFACE_TYPE_AMDGPU,
+ RAS_CMD_INTERFACE_TYPE_VF,
+ RAS_CMD_INTERFACE_TYPE_PF,
+};
+
+enum ras_cmd_id_range {
+ RAS_CMD_ID_COMMON_START = 0,
+ RAS_CMD_ID_COMMON_END = 0x10000,
+ RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END,
+ RAS_CMD_ID_AMDGPU_END = 0x20000,
+ RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END,
+ RAS_CMD_ID_MXGPU_END = 0x30000,
+ RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END,
+ RAS_CMD_ID_MXGPU_VF_END = 0x40000,
+};
+
+enum ras_cmd_id {
+ RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START,
+ RAS_CMD__QUERY_INTERFACE_INFO,
+ RAS_CMD__GET_DEVICES_INFO,
+ RAS_CMD__GET_BLOCK_ECC_STATUS,
+ RAS_CMD__INJECT_ERROR,
+ RAS_CMD__GET_BAD_PAGES,
+ RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ RAS_CMD__RESET_ALL_ERROR_COUNTS,
+ RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES,
+ RAS_CMD__TRANSLATE_FB_ADDRESS,
+ RAS_CMD__GET_LINK_TOPOLOGY,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ RAS_CMD__GET_CPER_RECORD,
+ RAS_CMD__GET_BATCH_TRACE_SNAPSHOT,
+ RAS_CMD__GET_BATCH_TRACE_RECORD,
+ RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END,
+};
+
+enum ras_cmd_response {
+ RAS_CMD__SUCCESS = 0,
+ RAS_CMD__SUCCESS_EXEED_BUFFER,
+ RAS_CMD__ERROR_UKNOWN_CMD,
+ RAS_CMD__ERROR_INVALID_CMD,
+ RAS_CMD__ERROR_VERSION,
+ RAS_CMD__ERROR_INVALID_INPUT_SIZE,
+ RAS_CMD__ERROR_INVALID_INPUT_DATA,
+ RAS_CMD__ERROR_DRV_INIT_FAIL,
+ RAS_CMD__ERROR_ACCESS_DENIED,
+ RAS_CMD__ERROR_GENERIC,
+ RAS_CMD__ERROR_TIMEOUT,
+};
+
+enum ras_error_type {
+ RAS_TYPE_ERROR__NONE = 0,
+ RAS_TYPE_ERROR__PARITY = 1,
+ RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TYPE_ERROR__POISON = 8,
+};
+
+struct ras_core_context;
+struct ras_cmd_ctx;
+
+struct ras_cmd_mgr {
+ struct list_head head;
+ struct ras_core_context *ras_core;
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_func_map {
+ uint32_t cmd_id;
+ int (*func)(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+};
+
+struct ras_device_bdf {
+ union {
+ struct {
+ uint32_t function : 3;
+ uint32_t device : 5;
+ uint32_t bus : 8;
+ uint32_t domain : 16;
+ };
+ uint32_t u32_all;
+ };
+};
+
+struct ras_cmd_param {
+ uint32_t idx_vf;
+ void *data;
+};
+
+#pragma pack(push, 8)
+struct ras_cmd_ctx {
+ uint32_t magic;
+ union {
+ struct {
+ uint16_t ras_cmd_minor_ver : 10;
+ uint16_t ras_cmd_major_ver : 6;
+ };
+ uint16_t ras_cmd_ver;
+ };
+ union {
+ struct {
+ uint16_t plat_major_ver : 10;
+ uint16_t plat_minor_ver : 6;
+ };
+ uint16_t plat_ver;
+ };
+ uint32_t cmd_id;
+ uint32_t cmd_res;
+ uint32_t input_size;
+ uint32_t output_size;
+ uint32_t output_buf_size;
+ uint32_t reserved[5];
+ uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE];
+ uint8_t output_buff_raw[];
+};
+
+struct ras_cmd_dev_handle {
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_block_ecc_info_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_block_ecc_info_rsp {
+ uint32_t version;
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t reserved[6];
+};
+
+struct ras_cmd_inject_error_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint64_t address;
+ uint32_t error_type;
+ uint32_t instance_mask;
+ union {
+ struct {
+ /* vf index */
+ uint64_t vf_idx : 6;
+ /* method of error injection. i.e persistent, coherent etc */
+ uint64_t method : 10;
+ uint64_t rsv : 48;
+ };
+ uint64_t value;
+ };
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_inject_error_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ uint64_t address;
+};
+
+struct ras_cmd_dev_info {
+ uint64_t dev_handle;
+ uint32_t location_id;
+ uint32_t ecc_enabled;
+ uint32_t ecc_supported;
+ uint32_t vf_num;
+ uint32_t asic_type;
+ uint32_t oam_id;
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_devices_info_rsp {
+ uint32_t version;
+ uint32_t dev_num;
+ uint32_t reserved[6];
+ struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM];
+};
+
+struct ras_cmd_bad_page_record {
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+ uint64_t retired_page;
+ uint64_t ts;
+
+ uint32_t err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ unsigned char valid;
+ unsigned char reserved[8];
+};
+
+struct ras_cmd_bad_pages_info_req {
+ struct ras_cmd_dev_handle device;
+ uint32_t group_index;
+ uint32_t reserved[5];
+};
+
+struct ras_cmd_bad_pages_info_rsp {
+ uint32_t version;
+ uint32_t group_index;
+ uint32_t bp_in_group;
+ uint32_t bp_total_cnt;
+ uint32_t reserved[4];
+ struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+};
+
+struct ras_query_interface_info_req {
+ uint32_t reserved[8];
+};
+
+struct ras_query_interface_info_rsp {
+ uint32_t version;
+ uint32_t ras_cmd_major_ver;
+ uint32_t ras_cmd_minor_ver;
+ uint32_t plat_major_ver;
+ uint32_t plat_minor_ver;
+ uint8_t interface_type;
+ uint8_t rsv[3];
+ uint32_t reserved[8];
+};
+
+#define RAS_MAX_NUM_SAFE_RANGES 64
+struct ras_cmd_ras_safe_fb_address_ranges_rsp {
+ uint32_t version;
+ uint32_t num_ranges;
+ uint32_t reserved[4];
+ struct {
+ uint64_t start;
+ uint64_t size;
+ uint32_t idx;
+ uint32_t reserved[3];
+ } range[RAS_MAX_NUM_SAFE_RANGES];
+};
+
+enum ras_fb_addr_type {
+ RAS_FB_ADDR_SOC_PHY, /* SPA */
+ RAS_FB_ADDR_BANK,
+ RAS_FB_ADDR_VF_PHY, /* GPA */
+ RAS_FB_ADDR_UNKNOWN
+};
+
+struct ras_fb_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+ uint32_t reserved[3];
+};
+
+struct ras_fb_vf_phy_addr {
+ uint32_t vf_idx;
+ uint32_t reserved;
+ uint64_t addr;
+};
+
+union ras_translate_fb_address {
+ struct ras_fb_bank_addr bank_addr;
+ uint64_t soc_phy_addr;
+ struct ras_fb_vf_phy_addr vf_phy_addr;
+};
+
+struct ras_cmd_translate_fb_address_req {
+ struct ras_cmd_dev_handle dev;
+ enum ras_fb_addr_type src_addr_type;
+ enum ras_fb_addr_type dest_addr_type;
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_cmd_translate_fb_address_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_dev_link_topology_req {
+ struct ras_cmd_dev_handle src;
+ struct ras_cmd_dev_handle dst;
+};
+
+struct ras_dev_link_topology_rsp {
+ uint32_t version;
+ uint32_t link_status; /* HW status of the link */
+ uint32_t link_type; /* type of the link */
+ uint32_t num_hops; /* number of hops */
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_cper_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_cper_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ uint64_t latest_cper_id;
+};
+
+struct ras_cmd_cper_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t cper_start_id;
+ uint32_t cper_num;
+ uint32_t buf_size;
+ uint64_t buf_ptr;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_cper_record_rsp {
+ uint32_t version;
+ uint32_t real_data_size;
+ uint32_t real_cper_num;
+ uint32_t remain_num;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_batch_trace_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_batch_trace_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_batch_num;
+ uint64_t start_batch_id;
+ uint64_t latest_batch_id;
+};
+
+struct ras_cmd_batch_trace_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t start_batch_id;
+ uint32_t batch_num;
+ uint32_t reserved[5];
+};
+
+struct batch_ras_trace_info {
+ uint64_t batch_id;
+ uint16_t offset;
+ uint8_t trace_num;
+ uint8_t rsv;
+ uint32_t reserved;
+};
+
+#define RAS_CMD_MAX_BATCH_NUM 300
+#define RAS_CMD_MAX_TRACE_NUM 300
+struct ras_cmd_batch_trace_record_rsp {
+ uint32_t version;
+ uint16_t real_batch_num;
+ uint16_t remain_num;
+ uint64_t start_batch_id;
+ uint32_t reserved[2];
+ struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM];
+ struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM];
+};
+
+#pragma pack(pop)
+
+int ras_cmd_init(struct ras_core_context *ras_core);
+int ras_cmd_fini(struct ras_core_context *ras_core);
+int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data);
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core);
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp);
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr);
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
new file mode 100644
index 000000000000..01122b55c98a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+
+#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t))
+
+#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0)
+
+static const char * const ras_block_name[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+ "mca",
+ "vcn",
+ "jpeg",
+ "ih",
+ "mpio",
+};
+
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id)
+{
+ if (block_id >= ARRAY_SIZE(ras_block_name))
+ return "";
+
+ return ras_block_name[block_id];
+}
+
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm)
+{
+ int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+ uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0;
+ uint32_t year = 0;
+ int seconds_per_day = 24 * 60 * 60;
+ int seconds_per_hour = 60 * 60;
+ int seconds_per_minute = 60;
+ int days, remaining_seconds;
+
+ days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds);
+
+ /* utc_timestamp follows the Unix epoch */
+ year = 1970;
+ while (days >= 365) {
+ if (IS_LEAP_YEAR(year)) {
+ if (days < 366)
+ break;
+ days -= 366;
+ } else {
+ days -= 365;
+ }
+ year++;
+ }
+
+ days_in_month[1] += IS_LEAP_YEAR(year);
+
+ month = 0;
+ while (days >= days_in_month[month]) {
+ days -= days_in_month[month];
+ month++;
+ }
+ month++;
+ day = days + 1;
+
+ if (remaining_seconds) {
+ hour = remaining_seconds / seconds_per_hour;
+ minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute;
+ second = remaining_seconds % seconds_per_minute;
+ }
+
+ tm->tm_year = year;
+ tm->tm_mon = month;
+ tm->tm_mday = day;
+ tm->tm_hour = hour;
+ tm->tm_min = minute;
+ tm->tm_sec = second;
+
+ return 0;
+}
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IN_RESET) ? true : false;
+}
+
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IS_VF) ? true : false;
+}
+
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return false;
+
+ return ras_core->is_rma;
+}
+
+static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t seqno)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo)
+ ret = kfifo_in_spinlocked(seqno_fifo,
+ &seqno, sizeof(seqno), &ras_core->seqno_lock);
+
+ return ret ? 0 : -EINVAL;
+}
+
+static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo) {
+ if (pop)
+ ret = kfifo_out_spinlocked(seqno_fifo,
+ seqno, sizeof(*seqno), &ras_core->seqno_lock);
+ else
+ ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno));
+ }
+
+ return ret ? 0 : -EINVAL;
+}
+
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type type)
+{
+ uint64_t seqno = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->gen_seqno)
+ ras_core->sys_fn->gen_seqno(ras_core, type, &seqno);
+
+ return seqno;
+}
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno)
+{
+ int ret = 0;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return -EINVAL;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CREATION, seqno);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, seqno);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop)
+{
+ uint64_t seq_no;
+ int ret = -ENODATA;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return 0;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CREATION, &seq_no, pop);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop);
+
+ if (ret)
+ seq_no = ras_core_gen_seqno(ras_core, seqno_type);
+
+ return seq_no;
+}
+
+static int ras_core_eeprom_recovery(struct ras_core_context *ras_core)
+{
+ int count;
+ int ret;
+
+ count = ras_eeprom_get_record_count(ras_core);
+ if (!count)
+ return 0;
+
+ /* Avoid bad page to be loaded again after gpu reset */
+ if (ras_umc_get_saved_eeprom_count(ras_core) >= count)
+ return 0;
+
+ ret = ras_umc_load_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret);
+ return ret;
+ }
+
+ ras_eeprom_sync_info(ras_core);
+
+ return ret;
+}
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
+{
+ struct ras_core_context *ras_core;
+ struct ras_core_config *config;
+
+ ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL);
+ if (!ras_core)
+ return NULL;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ kfree(ras_core);
+ return NULL;
+ }
+
+ memcpy(config, init_config, sizeof(*config));
+ ras_core->config = config;
+
+ return ras_core;
+}
+
+void ras_core_destroy(struct ras_core_context *ras_core)
+{
+ if (ras_core)
+ kfree(ras_core->config);
+
+ kfree(ras_core);
+}
+
+int ras_core_sw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ if (!ras_core->config) {
+ RAS_DEV_ERR(ras_core->dev, "No ras core config!\n");
+ return -EINVAL;
+ }
+
+ ras_core->sys_fn = ras_core->config->sys_fn;
+ if (!ras_core->sys_fn)
+ return -EINVAL;
+
+ ret = kfifo_alloc(&ras_core->de_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = kfifo_alloc(&ras_core->consumption_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_core->seqno_lock);
+
+ ret = ras_aca_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_umc_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_cmd_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_log_ring_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_psp_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ras_core_sw_fini(struct ras_core_context *ras_core)
+{
+ kfifo_free(&ras_core->de_seqno_fifo);
+ kfifo_free(&ras_core->consumption_seqno_fifo);
+
+ ras_psp_sw_fini(ras_core);
+ ras_log_ring_sw_fini(ras_core);
+ ras_cmd_fini(ras_core);
+ ras_umc_sw_fini(ras_core);
+ ras_aca_sw_fini(ras_core);
+
+ return 0;
+}
+
+int ras_core_hw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ras_core->ras_eeprom_supported =
+ ras_core->config->ras_eeprom_supported;
+
+ ras_core->poison_supported = ras_core->config->poison_supported;
+
+ ret = ras_psp_hw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_aca_hw_init(ras_core);
+ if (ret)
+ goto init_err1;
+
+ ret = ras_mp1_hw_init(ras_core);
+ if (ret)
+ goto init_err2;
+
+ ret = ras_nbio_hw_init(ras_core);
+ if (ret)
+ goto init_err3;
+
+ ret = ras_umc_hw_init(ras_core);
+ if (ret)
+ goto init_err4;
+
+ ret = ras_gfx_hw_init(ras_core);
+ if (ret)
+ goto init_err5;
+
+ ret = ras_eeprom_hw_init(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_core_eeprom_recovery(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to recovery ras core, ret:%d\n", ret);
+ goto init_err6;
+ }
+
+ ret = ras_eeprom_check_storage_status(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_process_init(ras_core);
+ if (ret)
+ goto init_err7;
+
+ ras_core->is_initialized = true;
+
+ return 0;
+
+init_err7:
+ ras_eeprom_hw_fini(ras_core);
+init_err6:
+ ras_gfx_hw_fini(ras_core);
+init_err5:
+ ras_umc_hw_fini(ras_core);
+init_err4:
+ ras_nbio_hw_fini(ras_core);
+init_err3:
+ ras_mp1_hw_fini(ras_core);
+init_err2:
+ ras_aca_hw_fini(ras_core);
+init_err1:
+ ras_psp_hw_fini(ras_core);
+ return ret;
+}
+
+int ras_core_hw_fini(struct ras_core_context *ras_core)
+{
+ ras_core->is_initialized = false;
+
+ ras_process_fini(ras_core);
+ ras_eeprom_hw_fini(ras_core);
+ ras_gfx_hw_fini(ras_core);
+ ras_nbio_hw_fini(ras_core);
+ ras_umc_hw_fini(ras_core);
+ ras_mp1_hw_fini(ras_core);
+ ras_aca_hw_fini(ras_core);
+ ras_psp_hw_fini(ras_core);
+
+ return 0;
+}
+
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data)
+{
+ return ras_nbio_handle_irq_error(ras_core, data);
+}
+
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
+{
+ int ret = 0;
+
+ ras_aca_mark_fatal_flag(ras_core);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL);
+
+ return ret;
+}
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
+{
+ if (ras_core->ras_nbio.ip_func &&
+ ras_core->ras_nbio.ip_func->get_memory_partition_mode)
+ return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n");
+ return 0;
+}
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL);
+ if (!ret)
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL);
+
+ return ret;
+}
+
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count)
+{
+ int ret;
+
+ if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core)
+ return -EINVAL;
+
+ ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count);
+ if (!ret)
+ ras_aca_clear_block_new_ecc_count(ras_core, block);
+
+ return ret;
+}
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable)
+{
+ ras_core->ras_core_enabled = enable;
+
+ return 0;
+}
+
+bool ras_core_is_enabled(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_core_enabled;
+}
+
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_utc_second_timestamp)
+ return ras_core->sys_fn->get_utc_second_timestamp(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n");
+ return 0;
+}
+
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ if (!ras_core || !soc_pa || !bank_addr)
+ return -EINVAL;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa);
+}
+
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->detect_ras_interrupt)
+ return ras_core->sys_fn->detect_ras_interrupt(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n");
+ return false;
+}
+
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
+ return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n");
+ return -EACCES;
+}
+
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
+ return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n");
+ return -EACCES;
+}
+
+bool ras_core_is_ready(struct ras_core_context *ras_core)
+{
+ return ras_core ? ras_core->is_initialized : false;
+}
+
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ return ras_eeprom_check_safety_watermark(ras_core);
+}
+
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true);
+
+ return 1;
+}
+
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, true, false);
+}
+
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, false, false);
+}
+
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->ras_notifier)
+ return ras_core->sys_fn->ras_notifier(ras_core, event_id, data);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
+
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_device_system_info)
+ return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
new file mode 100644
index 000000000000..2343991adccf
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE;
+static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC;
+static const struct ras_cper_guid BOOT = BOOT__TYPE;
+
+static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP;
+static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR;
+
+static void cper_get_timestamp(struct ras_core_context *ras_core,
+ struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp)
+{
+ struct ras_time tm = {0};
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = tm.tm_mon;
+ timestamp->year = tm.tm_year % 100;
+ timestamp->century = tm.tm_year / 100;
+}
+
+static void fill_section_hdr(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, enum ras_cper_type type,
+ enum ras_cper_severity sev, struct ras_log_info *trace)
+{
+ struct device_system_info dev_info = {0};
+ char record_id[16];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR__REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = sev;
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.partition_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp);
+
+ snprintf(record_id, 9, "%d:%llX", dev_info.socket_id,
+ RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ dev_info.vendor_id, dev_info.device_id);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU);
+
+ switch (type) {
+ case RAS_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ case RAS_CPER_TYPE_RMA:
+ hdr->notify_type = MCE;
+ break;
+ case RAS_CPER_TYPE_RUNTIME:
+ if (sev == RAS_CPER_SEV_NON_FATAL_CE)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n");
+ break;
+ }
+}
+
+static int fill_section_descriptor(struct ras_core_context *ras_core,
+ struct cper_section_descriptor *descriptor,
+ enum ras_cper_severity sev,
+ struct ras_cper_guid sec_type,
+ uint32_t section_offset,
+ uint32_t section_length)
+{
+ struct device_system_info dev_info = {0};
+
+ descriptor->revision_minor = CPER_SEC__MINOR_REV_1;
+ descriptor->revision_major = CPER_SEC__MAJOR_REV_22;
+ descriptor->sec_offset = section_offset;
+ descriptor->sec_length = section_length;
+ descriptor->valid_bits.fru_text = 1;
+ descriptor->flag_bits.primary = 1;
+ descriptor->severity = sev;
+ descriptor->sec_type = sec_type;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id);
+
+ if (sev == RAS_CPER_SEV_RMA)
+ descriptor->flag_bits.exceed_err_threshold = 1;
+
+ if (sev == RAS_CPER_SEV_NON_FATAL_UE)
+ descriptor->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+static int fill_section_fatal(struct ras_core_context *ras_core,
+ struct cper_section_fatal *fatal, struct ras_log_info *trace)
+{
+ fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ fatal->data.reg_arr_size = sizeof(fatal->data.reg);
+
+ fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS];
+ fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR];
+ fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID];
+ fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND];
+
+ return 0;
+}
+
+static int fill_section_runtime(struct ras_core_context *ras_core,
+ struct cper_section_runtime *runtime, struct ras_log_info *trace)
+{
+ runtime->hdr.valid_bits.err_info_cnt = 1;
+ runtime->hdr.valid_bits.err_context_cnt = 1;
+
+ runtime->descriptor.error_type = RUNTIME;
+ runtime->descriptor.ms_chk_bits.err_type_valid = 1;
+
+ runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump);
+
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND];
+
+ return 0;
+}
+
+static int cper_generate_runtime_record(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num,
+ enum ras_cper_severity sev)
+{
+ struct cper_section_descriptor *descriptor;
+ struct cper_section_runtime *runtime;
+ int i;
+
+ fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]);
+ hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num);
+ hdr->sec_cnt = arr_num;
+ for (i = 0; i < arr_num; i++) {
+ descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr +
+ RAS_SEC_DESC_OFFSET(i));
+ runtime = (struct cper_section_runtime *)((uint8_t *)hdr +
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i));
+
+ fill_section_descriptor(ras_core, descriptor, sev, RUNTIME,
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i),
+ sizeof(struct cper_section_runtime));
+ fill_section_runtime(ras_core, runtime, trace_arr[i]);
+ }
+
+ return 0;
+}
+
+static int cper_generate_fatal_record(struct ras_core_context *ras_core,
+ uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num)
+{
+ struct ras_cper_fatal_record record = {0};
+ int i = 0;
+
+ for (i = 0; i < arr_num; i++) {
+ fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL,
+ RAS_CPER_SEV_FATAL_UE, trace_arr[i]);
+ record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN;
+ record.hdr.sec_cnt = 1;
+
+ fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE,
+ CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal),
+ sizeof(struct cper_section_fatal));
+
+ fill_section_fatal(ras_core, &record.fatal, trace_arr[i]);
+
+ memcpy(buffer + (i * record.hdr.record_length),
+ &record, record.hdr.record_length);
+ }
+
+ return 0;
+}
+
+static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count)
+{
+ int size = 0;
+
+ size += RAS_HDR_LEN;
+ size += (RAS_SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case RAS_CPER_TYPE_RUNTIME:
+ case RAS_CPER_TYPE_RMA:
+ size += (RAS_NONSTD_SEC_LEN * section_count);
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ size += (RAS_FATAL_SEC_LEN * section_count);
+ size += (RAS_HDR_LEN * (section_count - 1));
+ break;
+ case RAS_CPER_TYPE_BOOT:
+ size += (RAS_BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ /* should never reach here */
+ break;
+ }
+
+ return size;
+}
+
+static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event)
+{
+ switch (event) {
+ case RAS_LOG_EVENT_UE:
+ return RAS_CPER_TYPE_FATAL;
+ case RAS_LOG_EVENT_DE:
+ case RAS_LOG_EVENT_CE:
+ case RAS_LOG_EVENT_POISON_CREATION:
+ case RAS_LOG_EVENT_POISON_CONSUMPTION:
+ return RAS_CPER_TYPE_RUNTIME;
+ case RAS_LOG_EVENT_RMA:
+ return RAS_CPER_TYPE_RMA;
+ default:
+ /* should never reach here */
+ return RAS_CPER_TYPE_RUNTIME;
+ }
+}
+
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len)
+{
+ uint8_t *buffer = buf;
+ uint64_t buf_size = buf_len;
+ int record_size, saved_size = 0;
+ struct cper_section_hdr *hdr;
+
+ /* All the batch traces share the same event */
+ record_size = cper_get_record_size(
+ cper_ras_log_event_to_cper_type(trace_list[0]->event), count);
+
+ if ((record_size + saved_size) > buf_size)
+ return -ENOMEM;
+
+ hdr = (struct cper_section_hdr *)(buffer + saved_size);
+
+ switch (trace_list[0]->event) {
+ case RAS_LOG_EVENT_RMA:
+ cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA);
+ break;
+ case RAS_LOG_EVENT_DE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE);
+ break;
+ case RAS_LOG_EVENT_CE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE);
+ break;
+ case RAS_LOG_EVENT_UE:
+ cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event);
+ break;
+ }
+
+ saved_size += record_size;
+
+ *real_data_len = saved_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
new file mode 100644
index 000000000000..076c1883c1ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_CPER_H__
+#define __RAS_CPER_H__
+
+#define CPER_UUID_MAX_SIZE 16
+struct ras_cper_guid {
+ uint8_t b[CPER_UUID_MAX_SIZE];
+};
+
+#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ ((struct ras_cper_guid) \
+ {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define CPER_HDR__REV_1 (0x100)
+#define CPER_SEC__MINOR_REV_1 (0x01)
+#define CPER_SEC__MAJOR_REV_22 (0x22)
+#define CPER_OAM_MAX_COUNT (8)
+
+#define CPER_CTX_TYPE__CRASH (1)
+#define CPER_CTX_TYPE__BOOT (9)
+
+#define CPER_CREATOR_ID__AMDGPU "amdgpu"
+
+#define CPER_NOTIFY__MCE \
+ CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY__CMC \
+ CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT__TYPE \
+ CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define GPU__CRASHDUMP \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define GPU__NONSTANDARD_ERROR \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR__SECTION_TYPE \
+ CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum ras_cper_type {
+ RAS_CPER_TYPE_RUNTIME,
+ RAS_CPER_TYPE_FATAL,
+ RAS_CPER_TYPE_BOOT,
+ RAS_CPER_TYPE_RMA,
+};
+
+enum ras_cper_severity {
+ RAS_CPER_SEV_NON_FATAL_UE = 0,
+ RAS_CPER_SEV_FATAL_UE = 1,
+ RAS_CPER_SEV_NON_FATAL_CE = 2,
+ RAS_CPER_SEV_RMA = 3,
+
+ RAS_CPER_SEV_UNUSED = 10,
+};
+
+enum ras_cper_aca_reg {
+ RAS_CPER_ACA_REG_CTL = 0,
+ RAS_CPER_ACA_REG_STATUS = 1,
+ RAS_CPER_ACA_REG_ADDR = 2,
+ RAS_CPER_ACA_REG_MISC0 = 3,
+ RAS_CPER_ACA_REG_CONFIG = 4,
+ RAS_CPER_ACA_REG_IPID = 5,
+ RAS_CPER_ACA_REG_SYND = 6,
+ RAS_CPER_ACA_REG_DESTAT = 8,
+ RAS_CPER_ACA_REG_DEADDR = 9,
+ RAS_CPER_ACA_REG_MASK = 10,
+
+ RAS_CPER_ACA_REG_COUNT = 16,
+};
+
+#pragma pack(push, 1)
+
+struct ras_cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_section_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum ras_cper_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct ras_cper_timestamp timestamp;
+ char platform_id[16];
+ struct ras_cper_guid partition_id; /* Reserved */
+ char creator_id[16];
+ struct ras_cper_guid notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_section_descriptor {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ struct ras_cper_guid sec_type;
+ char fru_id[16];
+ enum ras_cper_severity severity;
+ char fru_text[20];
+};
+
+struct runtime_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct runtime_descriptor {
+ struct ras_cper_guid error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct runtime_error_reg {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT];
+};
+
+struct cper_section_runtime {
+ struct runtime_hdr hdr;
+ struct runtime_descriptor descriptor;
+ struct runtime_error_reg reg;
+};
+
+struct crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct fatal_reg_info {
+ uint64_t status;
+ uint64_t addr;
+ uint64_t ipid;
+ uint64_t synd;
+};
+
+struct crashdump_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct fatal_reg_info reg;
+};
+
+struct crashdump_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_OAM_MAX_COUNT];
+};
+
+struct cper_section_fatal {
+ struct crashdump_hdr hdr;
+ struct crashdump_fatal data;
+};
+
+struct cper_section_boot {
+ struct crashdump_hdr hdr;
+ struct crashdump_boot data;
+};
+
+struct ras_cper_fatal_record {
+ struct cper_section_hdr hdr;
+ struct cper_section_descriptor descriptor;
+ struct cper_section_fatal fatal;
+};
+#pragma pack(pop)
+
+#define RAS_HDR_LEN (sizeof(struct cper_section_hdr))
+#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx))
+
+#define RAS_BOOT_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx))
+#define RAS_FATAL_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx))
+#define RAS_NONSTD_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx))
+
+struct ras_core_context;
+struct ras_log_info;
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
new file mode 100644
index 000000000000..cd6b057bdaf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras_eeprom.h"
+#include "ras.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+
+/*
+ * The 2 macros bellow represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define RAS_TABLE_HEADER_SIZE 20
+#define RAS_TABLE_RECORD_SIZE 24
+
+/* Table hdr is 'AMDR' */
+#define RAS_TABLE_HDR_VAL 0x414d4452
+
+/* Bad GPU tag ‘BADG’ */
+#define RAS_TABLE_HDR_BAD 0x42414447
+
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* Assume 2-Mbit size EEPROM and take up the whole space. */
+#define RAS_TBL_SIZE_BYTES (256 * 1024)
+#define RAS_TABLE_START 0
+#define RAS_HDR_START RAS_TABLE_START
+#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+ * offset off of RAS_TABLE_START. That is, this is something you can
+ * add to control->i2c_address, and then tell I2C layer to read
+ * from/write to there. _N is the so called absolute index,
+ * because it starts right after the table header.
+ */
+#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+ (_N) * RAS_TABLE_RECORD_SIZE)
+
+#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+ (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+
+/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+ * of "fri", return the absolute record index off of the end of
+ * the table header.
+ */
+#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+ (_C)->ras_max_record_count)
+
+#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom))
+
+static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_eeprom_supported;
+}
+
+static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core,
+ struct ras_eeprom_control *control)
+{
+ int ret = -EINVAL;
+
+ if (control->sys_func &&
+ control->sys_func->update_eeprom_i2c_config)
+ ret = control->sys_func->update_eeprom_i2c_config(ras_core);
+ else
+ RAS_DEV_WARN(ras_core->dev,
+ "No eeprom i2c system config!\n");
+
+ return !ret ? true : false;
+}
+
+static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int ret;
+
+ if (control->sys_func && control->sys_func->eeprom_i2c_xfer) {
+ ret = control->sys_func->eeprom_i2c_xfer(ras_core,
+ eeprom_addr, eeprom_buf, buf_size, read);
+
+ if ((ret > 0) && !read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+
+ return ret;
+ }
+
+ RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n");
+ return -EINVAL;
+}
+
+static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
+
+ if (read)
+ limit = ras_core->ras_eeprom.max_read_len;
+ else
+ limit = ras_core->ras_eeprom.max_write_len;
+
+ if (limit && (limit <= EEPROM_OFFSET_SIZE)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
+ eeprom_addr, buf_size,
+ read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ return -EINVAL;
+ }
+
+ ras_core_down_gpu_reset_lock(ras_core);
+
+ if (limit == 0) {
+ res = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, buf_size, read);
+ } else {
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = (buf_size < limit) ? buf_size : limit;
+
+ r = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ break;
+
+ res += r;
+ }
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return res;
+}
+
+static int __eeprom_read(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, true);
+}
+
+static int __eeprom_write(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, false);
+}
+
+static void
+__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void
+__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __write_table_header(struct ras_eeprom_control *control)
+{
+ u8 buf[RAS_TABLE_HEADER_SIZE];
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int res;
+
+ memset(buf, 0, sizeof(buf));
+ __encode_table_header_to_buf(&control->tbl_hdr, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table header:%d\n", res);
+ } else if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static void
+__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table ras info:%d\n", res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
+static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ /* Header checksum, skip checksum field in the calculation */
+ sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+ pp = (u8 *) &control->tbl_hdr;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static int ras_eeprom_correct_header_tag(
+ struct ras_eeprom_control *control,
+ uint32_t header)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ u8 *hh;
+ int res;
+ u8 csum;
+
+ csum = -hdr->checksum;
+
+ hh = (void *) &hdr->header;
+ csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+ hh = (void *) &header;
+ csum += hh[0] + hh[1] + hh[2] + hh[3];
+ csum = -csum;
+ mutex_lock(&control->ras_tbl_mutex);
+ hdr->header = header;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void ras_set_eeprom_table_version(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ hdr->version = RAS_TABLE_VER_V3;
+}
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ u8 csum;
+ int res;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ hdr->header = RAS_TABLE_HDR_VAL;
+ ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = RAS_GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = control->record_threshold_count;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+
+ control->ras_num_recs = 0;
+ control->ras_fri = 0;
+
+ control->bad_channel_bitmap = 0;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+ control->update_channel_flag = false;
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void
+__encode_table_record_to_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buf[i++] = record->err_type;
+
+ buf[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buf + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+ i += 6;
+
+ buf[i++] = record->mem_channel;
+ buf[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+}
+
+static void
+__decode_table_record_from_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buf[i++];
+
+ record->bank = buf[i++];
+
+ memcpy(&tmp, buf + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buf + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buf[i++];
+ record->mcumc_id = buf[i++];
+
+ memcpy(&tmp, buf + i, 6);
+ record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ bool ret = false;
+ int bad_page_count;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return false;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) {
+ if (bad_page_count > control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
+ ret = false;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consider adjusting the customized threshold.\n");
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * __ras_eeprom_write -- write indexed from buffer to EEPROM
+ * @control: pointer to control structure
+ * @buf: pointer to buffer containing data to write
+ * @fri: start writing at this index
+ * @num: number of records to write
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_write(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_write(ras_core,
+ control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Writing %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short write, return error.*/
+ RAS_DEV_ERR(ras_core->dev,
+ "Wrote %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static int ras_eeprom_append_table(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ const u32 num)
+{
+ u32 a, b, i;
+ u8 *buf, *pp;
+ int res;
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Encode all of them in one go.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __encode_table_record_to_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+
+ /* a, first record index to write into.
+ * b, last record index to write into.
+ * a = first index to read (fri) + number of records in the table,
+ * b = a + @num - 1.
+ * Let N = control->ras_max_num_record_count, then we have,
+ * case 0: 0 <= a <= b < N,
+ * just append @num records starting at a;
+ * case 1: 0 <= a < N <= b,
+ * append (N - a) records starting at a, and
+ * append the remainder, b % N + 1, starting at 0.
+ * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+ * case 2a: 0 <= a <= b < N
+ * append num records starting at a; and fix fri if b overwrote it,
+ * and since a <= b, if b overwrote it then a must've also,
+ * and if b didn't overwrite it, then a didn't also.
+ * case 2b: 0 <= b < a < N
+ * write num records starting at a, which wraps around 0=N
+ * and overwrite fri unconditionally. Now from case 2a,
+ * this means that b eclipsed fri to overwrite it and wrap
+ * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+ * set fri = b + 1 (mod N).
+ * Now, since fri is updated in every case, except the trivial case 0,
+ * the number of records present in the table after writing, is,
+ * num_recs - 1 = b - fri (mod N), and we take the positive value,
+ * by adding an arbitrary multiple of N before taking the modulo N
+ * as shown below.
+ */
+ a = control->ras_fri + control->ras_num_recs;
+ b = a + num - 1;
+ if (b < control->ras_max_record_count) {
+ res = __ras_eeprom_write(control, buf, a, num);
+ } else if (a < control->ras_max_record_count) {
+ u32 g0, g1;
+
+ g0 = control->ras_max_record_count - a;
+ g1 = b % control->ras_max_record_count + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ if (g1 > control->ras_fri)
+ control->ras_fri = g1 % control->ras_max_record_count;
+ } else {
+ a %= control->ras_max_record_count;
+ b %= control->ras_max_record_count;
+
+ if (a <= b) {
+ /* Note that, b - a + 1 = num. */
+ res = __ras_eeprom_write(control, buf, a, num);
+ if (res)
+ goto Out;
+ if (b >= control->ras_fri)
+ control->ras_fri = (b + 1) % control->ras_max_record_count;
+ } else {
+ u32 g0, g1;
+
+ /* b < a, which means, we write from
+ * a to the end of the table, and from
+ * the start of the table to b.
+ */
+ g0 = control->ras_max_record_count - a;
+ g1 = b + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ control->ras_fri = g1 % control->ras_max_record_count;
+ }
+ }
+ control->ras_num_recs = 1 +
+ (control->ras_max_record_count + b - control->ras_fri)
+ % control->ras_max_record_count;
+Out:
+ kfree(buf);
+ return res;
+}
+
+static int ras_eeprom_update_header(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int threshold_config = control->record_threshold_config;
+ u8 *buf, *pp, csum;
+ u32 buf_size;
+ int bad_page_count;
+ int res;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ /* Modify the header if it exceeds.
+ */
+ if (threshold_config != 0 &&
+ bad_page_count > control->record_threshold_count) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ bad_page_count, control->record_threshold_count);
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
+
+ if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) &&
+ (threshold_config != NONSTOP_OVER_THRESHOLD))
+ ras_core->is_rma = true;
+
+ /* ignore the -ENOTSUPP return value */
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL);
+ }
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ control->tbl_hdr.checksum = 0;
+
+ buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
+ res = -ENOMEM;
+ goto Out;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_record_offset,
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM failed reading records:%d\n", res);
+ goto Out;
+ } else if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM read %d out of %d bytes\n", res, buf_size);
+ res = -EIO;
+ goto Out;
+ }
+
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (threshold_config != 0 &&
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ bad_page_count <= control->record_threshold_count)
+ control->tbl_rai.health_percent = ((control->record_threshold_count -
+ bad_page_count) * 100) / control->record_threshold_count;
+
+ /* Recalc the checksum.
+ */
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+
+ csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ /* avoid sign extension when assigning to "checksum" */
+ csum = -csum;
+ control->tbl_hdr.checksum = csum;
+ res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+Out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * ras_core_eeprom_append -- append records to the EEPROM RAS table
+ * @control: pointer to control structure
+ * @record: array of records to append
+ * @num: number of records in @record array
+ *
+ * Append @num records to the table, calculate the checksum and write
+ * the table back to EEPROM. The maximum number of records that
+ * can be appended is between 1 and control->ras_max_record_count,
+ * regardless of how many records are already stored in the table.
+ *
+ * Return 0 on success or if EEPROM is not supported, -errno on error.
+ */
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int res;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n");
+ return -EINVAL;
+ } else if ((num + control->ras_num_recs) > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = ras_eeprom_append_table(control, record, num);
+ if (!res)
+ res = ras_eeprom_update_header(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+/**
+ * __ras_eeprom_read -- read indexed from EEPROM into buffer
+ * @control: pointer to control structure
+ * @buf: pointer to buffer to read into
+ * @fri: first record index, start reading at this index, absolute index
+ * @num: number of records to read
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_read(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Reading %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short read, return error.
+ */
+ RAS_DEV_ERR(ras_core->dev,
+ "Read %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int i, res;
+ u8 *buf, *pp;
+ u32 g0, g1;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_num_recs) {
+ RAS_DEV_ERR(ras_core->dev,
+ "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
+ return -EINVAL;
+ }
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Determine how many records to read, from the first record
+ * index, fri, to the end of the table, and from the beginning
+ * of the table, such that the total number of records is
+ * @num, and we handle wrap around when fri > 0 and
+ * fri + num > RAS_MAX_RECORD_COUNT.
+ *
+ * First we compute the index of the last element
+ * which would be fetched from each region,
+ * g0 is in [fri, fri + num - 1], and
+ * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+ * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+ * the last element to fetch, we set g0 to _the number_
+ * of elements to fetch, @num, since we know that the last
+ * indexed to be fetched does not exceed the table.
+ *
+ * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+ * we set g0 to the number of elements to read
+ * until the end of the table, and g1 to the number of
+ * elements to read from the beginning of the table.
+ */
+ g0 = control->ras_fri + num - 1;
+ g1 = g0 % control->ras_max_record_count;
+ if (g0 < control->ras_max_record_count) {
+ g0 = num;
+ g1 = 0;
+ } else {
+ g0 = control->ras_max_record_count - control->ras_fri;
+ g1 += 1;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = __ras_eeprom_read(control, buf, control->ras_fri, g0);
+ if (res)
+ goto Out;
+ if (g1) {
+ res = __ras_eeprom_read(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ }
+
+ res = 0;
+
+ /* Read up everything? Then transform.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+Out:
+ kfree(buf);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+
+ /* get available eeprom table version first before eeprom table init */
+ ras_set_eeprom_table_version(control);
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
+}
+
+/**
+ * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+ * @control: pointer to control structure
+ *
+ * Check the checksum of the stored in EEPROM RAS table.
+ *
+ * Return 0 if the checksum is correct,
+ * positive if it is not correct, and
+ * -errno on I/O error.
+ */
+static int __verify_ras_table_checksum(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int buf_size, res;
+ u8 csum, *buf, *pp;
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Out of memory checking RAS table checksum.\n");
+ return -ENOMEM;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, buf_size);
+ if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Partial read for checksum, res:%d\n", res);
+ /* On partial reads, return -EIO.
+ */
+ if (res >= 0)
+ res = -EIO;
+ goto Out;
+ }
+
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+Out:
+ kfree(buf);
+ return res < 0 ? res : csum;
+}
+
+static int __read_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table ras info, res:%d\n", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
+static int __check_ras_table_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+ struct ras_eeprom_table_header *hdr;
+ int res;
+
+ hdr = &control->tbl_hdr;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ control->ras_header_offset = RAS_HDR_START;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
+ mutex_init(&control->ras_tbl_mutex);
+
+ /* Read the table header from EEPROM address */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table header, res:%d\n", res);
+ return res >= 0 ? -EIO : res;
+ }
+
+ __decode_table_header_from_buf(hdr, buf);
+
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table");
+ return ras_eeprom_reset_table(ras_core);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ break;
+ case RAS_TABLE_VER_V1:
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+
+ return 0;
+}
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr;
+ int bad_page_count;
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ hdr = &control->tbl_hdr;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (hdr->header == RAS_TABLE_HDR_VAL) {
+ RAS_DEV_INFO(ras_core->dev,
+ "Found existing EEPROM table with %d records\n",
+ bad_page_count);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS table incorrect checksum or error:%d\n", res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * bad_page_count >= 9 * control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS records:%u exceeds 90%% of threshold:%d\n",
+ bad_page_count,
+ control->record_threshold_count);
+
+ } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+ control->record_threshold_config != 0) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS Table incorrect checksum or error:%d\n", res);
+
+ if (control->record_threshold_count >= bad_page_count) {
+ /* This means that, the threshold was increased since
+ * the last time the system was booted, and now,
+ * ras->record_threshold_count - control->num_recs > 0,
+ * so that at least one more record can be saved,
+ * before the page count threshold is reached.
+ */
+ RAS_DEV_INFO(ras_core->dev,
+ "records:%d threshold:%d, resetting RAS table header signature",
+ bad_page_count,
+ control->record_threshold_count);
+ res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ res = 0;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_ERR(ras_core->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+ }
+ }
+
+ return res < 0 ? res : 0;
+}
+
+int ras_eeprom_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+ struct ras_eeprom_config *eeprom_cfg;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ ras_core->is_rma = false;
+
+ control = &ras_core->ras_eeprom;
+
+ memset(control, 0, sizeof(*control));
+
+ eeprom_cfg = &ras_core->config->eeprom_cfg;
+ control->record_threshold_config =
+ eeprom_cfg->eeprom_record_threshold_config;
+
+ control->record_threshold_count = ras_eeprom_max_record_count(ras_core);
+ if (eeprom_cfg->eeprom_record_threshold_count <
+ control->record_threshold_count)
+ control->record_threshold_count =
+ eeprom_cfg->eeprom_record_threshold_count;
+
+ control->sys_func = eeprom_cfg->eeprom_sys_fn;
+ control->max_read_len = eeprom_cfg->max_i2c_read_len;
+ control->max_write_len = eeprom_cfg->max_i2c_write_len;
+ control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter;
+ control->i2c_port = eeprom_cfg->eeprom_i2c_port;
+ control->i2c_address = eeprom_cfg->eeprom_i2c_addr;
+
+ control->update_channel_flag = false;
+
+ return __check_ras_table_status(ras_core);
+}
+
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ control = &ras_core->ras_eeprom;
+ mutex_destroy(&control->ras_tbl_mutex);
+
+ return 0;
+}
+
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return 0;
+
+ return ras_core->ras_eeprom.ras_num_recs;
+}
+
+void ras_eeprom_sync_info(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return;
+
+ control = &ras_core->ras_eeprom;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+}
+
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return RAS_GPU_HEALTH_NONE;
+
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD)
+ return RAS_GPU_IN_BAD_STATUS;
+
+ return rai->rma_status;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
new file mode 100644
index 000000000000..2abe566c18b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_EEPROM_H__
+#define __RAS_EEPROM_H__
+#include "ras_sys.h"
+
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
+
+#define NONSTOP_OVER_THRESHOLD -2
+#define WARN_NONSTOP_OVER_THRESHOLD -1
+#define DISABLE_RETIRE_PAGE 0
+
+/*
+ * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0],
+ * nps mode: eeprom_umc_record.retired_row_pfn[47:40]
+ */
+#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL
+#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL
+#define EEPROM_RECORD_UMC_NPS_SHIFT 40
+
+#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \
+ (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \
+ EEPROM_RECORD_UMC_NPS_SHIFT)
+
+#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \
+ ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK)
+
+#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \
+do { \
+ uint64_t tmp = (NPS); \
+ tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \
+ tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \
+ (RECORD)->retired_row_pfn = tmp; \
+} while (0)
+
+enum ras_gpu_health_status {
+ RAS_GPU_HEALTH_NONE = 0,
+ RAS_GPU_HEALTH_USABLE = 1,
+ RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+ RAS_GPU_IN_BAD_STATUS = 3,
+};
+
+enum ras_eeprom_err_type {
+ RAS_EEPROM_ERR_NA,
+ RAS_EEPROM_ERR_RECOVERABLE,
+ RAS_EEPROM_ERR_NON_RECOVERABLE,
+ RAS_EEPROM_ERR_COUNT,
+};
+
+struct ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+} __packed;
+
+struct ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
+struct ras_eeprom_control {
+ struct ras_eeprom_table_header tbl_hdr;
+ struct ras_eeprom_table_ras_info tbl_rai;
+
+ /* record threshold */
+ int record_threshold_config;
+ uint32_t record_threshold_count;
+ bool update_channel_flag;
+
+ const struct ras_eeprom_sys_func *sys_func;
+ void *i2c_adapter;
+ u32 i2c_port;
+ u16 max_read_len;
+ u16 max_write_len;
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
+ /* The byte offset off of @i2c_address
+ * where the table header is found,
+ * and where the records start--always
+ * right after the header.
+ */
+ u32 ras_header_offset;
+ u32 ras_info_offset;
+ u32 ras_record_offset;
+
+ /* Number of records in the table.
+ */
+ u32 ras_num_recs;
+
+ /* First record index to read, 0-based.
+ * Range is [0, num_recs-1]. This is
+ * an absolute index, starting right after
+ * the table header.
+ */
+ u32 ras_fri;
+
+ /* Maximum possible number of records
+ * we could store, i.e. the maximum capacity
+ * of the table.
+ */
+ u32 ras_max_record_count;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_umc_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_row_pfn;
+ uint64_t ts;
+
+ enum ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ /* The following variables will not be saved to eeprom.
+ */
+ uint64_t cur_nps_retired_row_pfn;
+ uint32_t cur_nps_bank;
+ uint32_t cur_nps;
+};
+
+struct ras_core_context;
+int ras_eeprom_hw_init(struct ras_core_context *ras_core);
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core);
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core);
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core);
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core);
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core);
+void ras_eeprom_sync_info(struct ras_core_context *ras_core);
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core);
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
new file mode 100644
index 000000000000..f5ce28777705
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_gfx.h"
+#include "ras_core_status.h"
+
+static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ return &gfx_ras_func_v9_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "GFX ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ return gfx->ip_func->get_ta_subblock(ras_core,
+ error_type, subblock, ta_subblock);
+}
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ gfx->gfx_ip_version = ras_core->config->gfx_ip_version;
+
+ gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version);
+
+ return gfx->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_gfx_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
new file mode 100644
index 000000000000..8a42d69fb0ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_H__
+#define __RAS_GFX_H__
+
+struct ras_gfx_ip_func {
+ int (*get_ta_subblock)(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+};
+
+struct ras_gfx {
+ uint32_t gfx_ip_version;
+ const struct ras_gfx_ip_func *ip_func;
+};
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core);
+int ras_gfx_hw_fini(struct ras_core_context *ras_core);
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
new file mode 100644
index 000000000000..6213d3f125be
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_core_status.h"
+
+enum ta_gfx_v9_subblock {
+ /*CPC*/
+ TA_GFX_V9__GFX_CPC_INDEX_START = 0,
+ TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START,
+ TA_GFX_V9__GFX_CPC_UCODE,
+ TA_GFX_V9__GFX_DC_STATE_ME1,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME1,
+ TA_GFX_V9__GFX_DC_RESTORE_ME1,
+ TA_GFX_V9__GFX_DC_STATE_ME2,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME2,
+ TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF*/
+ TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME1,
+ TA_GFX_V9__GFX_CPF_TAG,
+ TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG,
+ /* CPG*/
+ TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_TAG,
+ TA_GFX_V9__GFX_CPG_TAG,
+ TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG,
+ /* GDS*/
+ TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI*/
+ TA_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ*/
+ TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_LDS_D,
+ TA_GFX_V9__GFX_SQ_LDS_I,
+ TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/
+ TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges)*/
+ TA_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0*/
+ TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START,
+ TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ TA_GFX_V9__GFX_SQC_INDEX0_START,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_INDEX0_END =
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1*/
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX1_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2*/
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX2_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA*/
+ TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_AFIFO,
+ TA_GFX_V9__GFX_TA_FL_LFIFO,
+ TA_GFX_V9__GFX_TA_FX_LFIFO,
+ TA_GFX_V9__GFX_TA_FS_CFIFO,
+ TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA*/
+ TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges)*/
+ TA_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0*/
+ TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1*/
+ TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ TA_GFX_V9__GFX_TCC_INDEX1_END =
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2*/
+ TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ TA_GFX_V9__GFX_TCC_WRITE_RETURN,
+ TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ TA_GFX_V9__GFX_TCC_INDEX2_END =
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3*/
+ TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_INDEX3_END =
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4*/
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX4_END =
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI*/
+ TA_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP*/
+ TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_LFIFO_RAM,
+ TA_GFX_V9__GFX_TCP_CMD_FIFO,
+ TA_GFX_V9__GFX_TCP_VM_FIFO,
+ TA_GFX_V9__GFX_TCP_DB_RAM,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD*/
+ TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_HI,
+ TA_GFX_V9__GFX_TD_CS_FIFO,
+ TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges)*/
+ TA_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0*/
+ TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_RRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_WRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1*/
+ TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_IORD_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2*/
+ TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D1MEM,
+ TA_GFX_V9__GFX_EA_MAM_D2MEM,
+ TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank*/
+ TA_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker*/
+ TA_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ TA_GFX_V9__GFX_MAX
+};
+
+struct ras_gfx_subblock_t {
+ unsigned char *name;
+ int ta_subblock;
+ int hw_supported_error_type;
+ int sw_supported_error_type;
+};
+
+#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
+ [RAS_GFX_V9__##subblock] = { \
+ #subblock, \
+ TA_GFX_V9__##subblock, \
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
+ }
+
+const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = {
+ RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
+static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ const struct ras_gfx_subblock_t *gfx_subblock;
+
+ if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks))
+ return -EINVAL;
+
+ gfx_subblock = &ras_gfx_v9_0_subblocks[subblock];
+ if (!gfx_subblock->name)
+ return -EPERM;
+
+ if (!(gfx_subblock->hw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ if (!(gfx_subblock->sw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ *ta_subblock = gfx_subblock->ta_subblock;
+
+ return 0;
+}
+
+const struct ras_gfx_ip_func gfx_ras_func_v9_0 = {
+ .get_ta_subblock = gfx_v9_0_get_ta_subblock,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
new file mode 100644
index 000000000000..659b56619747
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_V9_0_H__
+#define __RAS_GFX_V9_0_H__
+
+enum ras_gfx_v9_subblock {
+ /* CPC */
+ RAS_GFX_V9__GFX_CPC_INDEX_START = 0,
+ RAS_GFX_V9__GFX_CPC_SCRATCH =
+ RAS_GFX_V9__GFX_CPC_INDEX_START,
+ RAS_GFX_V9__GFX_CPC_UCODE,
+ RAS_GFX_V9__GFX_DC_STATE_ME1,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME1,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME1,
+ RAS_GFX_V9__GFX_DC_STATE_ME2,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME2,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ RAS_GFX_V9__GFX_CPC_INDEX_END =
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME2 =
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME1,
+ RAS_GFX_V9__GFX_CPF_TAG,
+ RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG,
+ /* CPG */
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_ROQ =
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_TAG,
+ RAS_GFX_V9__GFX_CPG_TAG,
+ RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG,
+ /* GDS */
+ RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ RAS_GFX_V9__GFX_GDS_INDEX_END =
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ RAS_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ */
+ RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_LDS_D,
+ RAS_GFX_V9__GFX_SQ_LDS_I,
+ RAS_GFX_V9__GFX_SQ_VGPR,
+ RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ RAS_GFX_V9__GFX_SQC_INDEX0_START =
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ RAS_GFX_V9__GFX_SQC_INDEX0_START,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_INDEX0_END =
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX1_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX2_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX_END =
+ RAS_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA */
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_DFIFO =
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_AFIFO,
+ RAS_GFX_V9__GFX_TA_FL_LFIFO,
+ RAS_GFX_V9__GFX_TA_FX_LFIFO,
+ RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA */
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_HOLE_FIFO =
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ RAS_GFX_V9__GFX_TCA_INDEX_END =
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ RAS_GFX_V9__GFX_TCC_INDEX0_START =
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX0_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_INDEX0_END =
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_DEC =
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ RAS_GFX_V9__GFX_TCC_INDEX1_END =
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ RAS_GFX_V9__GFX_TCC_WRITE_RETURN,
+ RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ RAS_GFX_V9__GFX_TCC_INDEX2_END =
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO =
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_INDEX3_END =
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX4_END =
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX_END =
+ RAS_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI */
+ RAS_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_CACHE_RAM =
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_LFIFO_RAM,
+ RAS_GFX_V9__GFX_TCP_CMD_FIFO,
+ RAS_GFX_V9__GFX_TCP_VM_FIFO,
+ RAS_GFX_V9__GFX_TCP_DB_RAM,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ RAS_GFX_V9__GFX_TCP_INDEX_END =
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_LO =
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_HI,
+ RAS_GFX_V9__GFX_TD_CS_FIFO,
+ RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ RAS_GFX_V9__GFX_EA_INDEX0_START =
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM =
+ RAS_GFX_V9__GFX_EA_INDEX0_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_RRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_WRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_INDEX0_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM =
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_IORD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_INDEX1_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D0MEM =
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D1MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D2MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX2_END =
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX_END =
+ RAS_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ RAS_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ RAS_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ RAS_GFX_V9__GFX_MAX
+};
+
+extern const struct ras_gfx_ip_func gfx_ras_func_v9_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
new file mode 100644
index 000000000000..d0621464f1a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+
+#define RAS_LOG_MAX_QUERY_SIZE 0xC000
+#define RAS_LOG_MEM_TEMP_SIZE 0x200
+#define RAS_LOG_MEMPOOL_SIZE \
+ (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE)
+
+#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn))
+
+static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = {
+ [ACA_REG_IDX__CTL] = 0x1,
+ [ACA_REG_IDX__STATUS] = 0xB000000000000137,
+ [ACA_REG_IDX__ADDR] = 0x0,
+ [ACA_REG_IDX__MISC0] = 0x0,
+ [ACA_REG_IDX__CONFG] = 0x1ff00000002,
+ [ACA_REG_IDX__IPID] = 0x9600000000,
+ [ACA_REG_IDX__SYND] = 0x0,
+};
+
+static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t count = 0;
+
+ if (log_ring->logged_ecc_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count should not less than 0!\n");
+ count = 0;
+ } else {
+ count = log_ring->logged_ecc_count;
+ }
+
+ if (count > RAS_LOG_MEMPOOL_SIZE)
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count is out of range!\n");
+
+ return count;
+}
+
+static int ras_log_ring_add_data(struct ras_core_context *ras_core,
+ struct ras_log_info *log, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Invalid batch sub seqno:%d, batch:0x%llx\n",
+ batch_tag->sub_seqno, batch_tag->batch_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ if (batch_tag) {
+ log->seqno =
+ BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno);
+ batch_tag->sub_seqno++;
+ } else {
+ log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0);
+ log_ring->mono_upward_batch_id++;
+ }
+ ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log);
+ if (!ret)
+ log_ring->logged_ecc_count++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to add ras log! seqno:0x%llx, ret:%d\n",
+ log->seqno, ret);
+ mempool_free(log, log_ring->ras_log_mempool);
+ }
+
+ return ret;
+}
+
+static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ uint32_t i = 0, j = 0;
+ uint64_t batch_id, idx;
+ void *data;
+ int ret = -ENODATA;
+
+ if (count > ras_log_ring_get_logged_ecc_count(ras_core))
+ return -EINVAL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ i++;
+ }
+ }
+ batch_id = ++log_ring->last_del_batch_id;
+ if (i >= count) {
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return ret;
+}
+
+static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t batch_id, idx;
+ unsigned long flags = 0;
+ void *data;
+ int j;
+
+ if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) &&
+ !log_ring->logged_ecc_count)
+ return;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ }
+ }
+ batch_id++;
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+}
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ memset(log_ring, 0, sizeof(*log_ring));
+
+ log_ring->ras_log_mempool = mempool_create_kmalloc_pool(
+ RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info));
+ if (!log_ring->ras_log_mempool)
+ return -ENOMEM;
+
+ INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL);
+
+ spin_lock_init(&log_ring->spin_lock);
+
+ return 0;
+}
+
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ ras_log_ring_clear_log_tree(ras_core);
+ log_ring->logged_ecc_count = 0;
+ log_ring->last_del_batch_id = 0;
+ log_ring->mono_upward_batch_id = 0;
+
+ mempool_destroy(log_ring->ras_log_mempool);
+
+ return 0;
+}
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct ras_log_batch_tag *batch_tag;
+ unsigned long flags = 0;
+
+ batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL);
+ if (!batch_tag)
+ return NULL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_tag->batch_id = log_ring->mono_upward_batch_id;
+ log_ring->mono_upward_batch_id++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ batch_tag->sub_seqno = 0;
+ batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ return batch_tag;
+}
+
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *batch_tag)
+{
+ kfree(batch_tag);
+}
+
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct ras_log_info *log;
+ void *obj;
+
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ if (!obj ||
+ (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) {
+ ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE);
+ if (!obj)
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ }
+
+ if (!obj) {
+ RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n");
+ return;
+ }
+
+ log = (struct ras_log_info *)obj;
+
+ memset(log, 0, sizeof(*log));
+ log->timestamp =
+ batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core);
+ log->event = event;
+
+ if (data)
+ memcpy(&log->aca_reg, data, sizeof(log->aca_reg));
+
+ if (event == RAS_LOG_EVENT_RMA)
+ memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg));
+
+ ras_log_ring_add_data(ras_core, log, batch_tag);
+}
+
+static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core,
+ uint64_t idx)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ void *data;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ data = radix_tree_lookup(&log_ring->ras_log_root, idx);
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return (struct ras_log_info *)data;
+}
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id,
+ struct ras_log_info **log_arr, uint32_t arr_num)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint32_t i, idx, count = 0;
+ void *data;
+
+ if ((batch_id >= log_ring->mono_upward_batch_id) ||
+ (batch_id < log_ring->last_del_batch_id))
+ return -EINVAL;
+
+ for (i = 0; i < MAX_RECORD_PER_BATCH; i++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, i);
+ data = ras_log_ring_lookup_data(ras_core, idx);
+ if (data) {
+ log_arr[count++] = data;
+ if (count >= arr_num)
+ break;
+ }
+ }
+
+ return count;
+}
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ overview->logged_batch_count =
+ log_ring->mono_upward_batch_id - log_ring->last_del_batch_id;
+ overview->last_batch_id = log_ring->mono_upward_batch_id;
+ overview->first_batch_id = log_ring->last_del_batch_id;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
new file mode 100644
index 000000000000..0ff6cc35678d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_LOG_RING_H__
+#define __RAS_LOG_RING_H__
+#include "ras_aca.h"
+
+#define MAX_RECORD_PER_BATCH 32
+
+#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8)
+
+enum ras_log_event {
+ RAS_LOG_EVENT_NONE,
+ RAS_LOG_EVENT_UE,
+ RAS_LOG_EVENT_DE,
+ RAS_LOG_EVENT_CE,
+ RAS_LOG_EVENT_POISON_CREATION,
+ RAS_LOG_EVENT_POISON_CONSUMPTION,
+ RAS_LOG_EVENT_RMA,
+ RAS_LOG_EVENT_COUNT_MAX,
+};
+
+struct ras_aca_reg {
+ uint64_t regs[ACA_REG_MAX_COUNT];
+};
+
+struct ras_log_info {
+ uint64_t seqno;
+ uint64_t timestamp;
+ enum ras_log_event event;
+ union {
+ struct ras_aca_reg aca_reg;
+ };
+};
+
+struct ras_log_batch_tag {
+ uint64_t batch_id;
+ uint64_t timestamp;
+ uint32_t sub_seqno;
+};
+
+struct ras_log_ring {
+ void *ras_log_mempool;
+ struct radix_tree_root ras_log_root;
+ spinlock_t spin_lock;
+ uint64_t mono_upward_batch_id;
+ uint64_t last_del_batch_id;
+ int logged_ecc_count;
+};
+
+struct ras_log_batch_overview {
+ uint64_t first_batch_id;
+ uint64_t last_batch_id;
+ uint32_t logged_batch_count;
+};
+
+struct ras_core_context;
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core);
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core);
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core);
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *tag);
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *tag);
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx,
+ struct ras_log_info **log_arr, uint32_t arr_num);
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
new file mode 100644
index 000000000000..f3321df85021
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_mp1_v13_0.h"
+
+static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &mp1_ras_func_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "MP1 ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->get_valid_bank_count(ras_core, type, count);
+}
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val);
+}
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ mp1->mp1_ip_version = ras_core->config->mp1_ip_version;
+ mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn;
+ if (!mp1->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version);
+
+ return mp1->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_mp1_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
new file mode 100644
index 000000000000..de1d08286f41
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_H__
+#define __RAS_MP1_H__
+#include "ras.h"
+
+enum ras_err_type;
+struct ras_mp1_ip_func {
+ int (*get_valid_bank_count)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+ int (*dump_valid_bank)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_mp1 {
+ uint32_t mp1_ip_version;
+ const struct ras_mp1_ip_func *ip_func;
+ const struct ras_mp1_sys_func *sys_func;
+};
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core);
+int ras_mp1_hw_fini(struct ras_core_context *ras_core);
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 ecc_type, u32 idx, u32 reg_idx, u64 *val);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
new file mode 100644
index 000000000000..310d39fc816b
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_core_status.h"
+#include "ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCount 0x36
+#define RAS_MP1_MSG_McaBankDumpDW 0x37
+#define RAS_MP1_MSG_ClearMcaOnRead 0x39
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+#define MAX_UE_BANKS_PER_QUERY 12
+#define MAX_CE_BANKS_PER_QUERY 12
+
+static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ uint32_t bank_count = 0;
+ u32 msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!sys_func || !sys_func->mp1_get_valid_bank_count)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_QueryValidMcaCount;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count);
+ if (!ret) {
+ if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) ||
+ ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY)))
+ return -EINVAL;
+
+ *count = bank_count;
+ }
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ u32 msg;
+
+ if (!sys_func || !sys_func->mp1_dump_valid_bank)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_McaBankDumpDW;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val);
+}
+
+const struct ras_mp1_ip_func mp1_ras_func_v13_0 = {
+ .get_valid_bank_count = mp1_v13_0_get_bank_count,
+ .dump_valid_bank = mp1_v13_0_dump_bank,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
new file mode 100644
index 000000000000..2edfdb5f6a75
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_V13_0_H__
+#define __RAS_MP1_V13_0_H__
+#include "ras_mp1.h"
+
+extern const struct ras_mp1_ip_func mp1_ras_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
new file mode 100644
index 000000000000..8bf1f35d595e
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio.h"
+#include "ras_nbio_v7_9.h"
+
+static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(7, 9, 0):
+ return &ras_nbio_v7_9;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "NBIO ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ nbio->nbio_ip_version = ras_core->config->nbio_ip_version;
+ nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn;
+ if (!nbio->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version);
+ if (!nbio->ip_func)
+ return -EINVAL;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, true);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true);
+ }
+
+ return 0;
+}
+
+int ras_nbio_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, false);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false);
+ }
+
+ return 0;
+}
+
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->ip_func) {
+ if (nbio->ip_func->handle_ras_controller_intr_no_bifring)
+ nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core);
+ if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring)
+ nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core);
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
new file mode 100644
index 000000000000..0a1313e59a02
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_H__
+#define __RAS_NBIO_H__
+#include "ras.h"
+
+struct ras_core_context;
+
+struct ras_nbio_ip_func {
+ int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core);
+ int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core);
+ uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio {
+ uint32_t nbio_ip_version;
+ const struct ras_nbio_ip_func *ip_func;
+ const struct ras_nbio_sys_func *sys_func;
+};
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core);
+int ras_nbio_hw_fini(struct ras_core_context *ras_core);
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
new file mode 100644
index 000000000000..f17d708ec668
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio_v7_9.h"
+
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+
+/* BIF_BX0_BIF_INTR_CNTL */
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2
+/* BIF_BX_PF0_PARTITION_MEM_STATUS */
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* TODO: handle ras controller interrupt */
+ }
+
+ return 0;
+}
+
+static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+ int ret = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ ret = ras_core_handle_fatal_error(ras_core);
+ }
+
+ return ret;
+}
+
+static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core)
+{
+ uint32_t mem_status;
+ uint32_t mem_mode;
+
+ mem_status =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+
+ /* Each bit represents a mode 1-8*/
+ mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
+
+ return ffs(mem_mode);
+}
+
+const struct ras_nbio_ip_func ras_nbio_v7_9 = {
+ .handle_ras_controller_intr_no_bifring =
+ nbio_v7_9_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
+ .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
new file mode 100644
index 000000000000..8711c82a927f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_V7_9_H__
+#define __RAS_NBIO_V7_9_H__
+#include "ras_nbio.h"
+
+extern const struct ras_nbio_ip_func ras_nbio_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
new file mode 100644
index 000000000000..02f0657f78a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_process.h"
+
+#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req))
+
+#define RAS_POLLING_ECC_TIMEOUT 300
+
+static int ras_process_put_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_in_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+ if (!ret) {
+ RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core,
+ uint32_t reset_cause)
+{
+ struct ras_event_req req = {0};
+
+ req.reset = reset_cause;
+
+ return ras_process_put_event(ras_core, &req);
+}
+
+static int ras_process_get_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ return kfifo_out_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+}
+
+static void ras_process_clear_event_fifo(struct ras_core_context *ras_core)
+{
+ struct ras_event_req req;
+ int ret;
+
+ do {
+ ret = ras_process_get_event(ras_core, &req);
+ } while (ret);
+}
+
+#define AMDGPU_RAS_WAITING_DATA_READY 200
+static int ras_process_umc_event(struct ras_core_context *ras_core,
+ uint32_t event_count)
+{
+ struct ras_ecc_count ecc_data;
+ int ret = 0;
+ uint32_t timeout = 0;
+ uint32_t detected_de_count = 0;
+
+ do {
+ memset(&ecc_data, 0, sizeof(ecc_data));
+ ret = ras_core_update_ecc_info(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data);
+ if (ret)
+ return ret;
+
+ if (ecc_data.new_de_count) {
+ detected_de_count += ecc_data.new_de_count;
+ timeout = 0;
+ } else {
+ if (!timeout && event_count)
+ timeout = AMDGPU_RAS_WAITING_DATA_READY;
+
+ if (timeout) {
+ if (!--timeout)
+ break;
+
+ msleep(1);
+ }
+ }
+ } while (detected_de_count < event_count);
+
+ if (detected_de_count && ras_core_gpu_is_rma(ras_core))
+ ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA);
+
+ return 0;
+}
+
+static int ras_process_non_umc_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ struct ras_event_req req;
+ uint32_t event_count = kfifo_len(&ras_proc->event_fifo);
+ uint32_t reset_flags = 0;
+ int ret = 0, i;
+
+ for (i = 0; i < event_count; i++) {
+ memset(&req, 0, sizeof(req));
+ ret = ras_process_get_event(ras_core, &req);
+ if (!ret)
+ continue;
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__POISON_CONSUMPTION, &req);
+
+ reset_flags |= req.reset;
+
+ if (req.reset == GPU_RESET_CAUSE_RMA)
+ continue;
+
+ if (req.reset)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} GPU reset for %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ else
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ }
+
+ if (reset_flags) {
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESET_GPU, &reset_flags);
+ if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA))
+ return -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+int ras_process_handle_ras_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ uint32_t umc_event_count;
+ int ret;
+
+ ras_aca_clear_fatal_flag(ras_core);
+ ras_umc_log_pending_bad_bank(ras_core);
+
+ do {
+ umc_event_count = atomic_read(&ras_proc->umc_interrupt_count);
+ ret = ras_process_umc_event(ras_core, umc_event_count);
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET)
+ break;
+
+ if (umc_event_count)
+ atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count);
+ } while (atomic_read(&ras_proc->umc_interrupt_count));
+
+ if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) &&
+ (kfifo_len(&ras_proc->event_fifo)))
+ ret = ras_process_non_umc_event(ras_core);
+
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) {
+ /* Clear poison fifo */
+ ras_process_clear_event_fifo(ras_core);
+ atomic_set(&ras_proc->umc_interrupt_count, 0);
+ }
+
+ return ret;
+}
+
+static int thread_wait_condition(void *param)
+{
+ struct ras_process *ras_proc = (struct ras_process *)param;
+
+ return (kthread_should_stop() ||
+ atomic_read(&ras_proc->ras_interrupt_req));
+}
+
+static int ras_process_thread(void *context)
+{
+ struct ras_core_context *ras_core = (struct ras_core_context *)context;
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ while (!kthread_should_stop()) {
+ ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq,
+ thread_wait_condition, ras_proc,
+ msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT));
+
+ if (kthread_should_stop())
+ break;
+
+ if (!ras_core->is_initialized)
+ continue;
+
+ atomic_set(&ras_proc->ras_interrupt_req, 0);
+
+ if (ras_core_gpu_in_reset(ras_core))
+ continue;
+
+ if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event)
+ ras_core->sys_fn->async_handle_ras_event(ras_core, NULL);
+ else
+ ras_process_handle_ras_event(ras_core);
+ }
+
+ return 0;
+}
+
+int ras_process_init(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_proc->fifo_spinlock);
+
+ init_waitqueue_head(&ras_proc->ras_process_wq);
+
+ ras_proc->ras_process_thread = kthread_run(ras_process_thread,
+ (void *)ras_core, "ras_process_thread");
+ if (!ras_proc->ras_process_thread) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ras_process_fini(ras_core);
+ return ret;
+}
+
+int ras_process_fini(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ if (ras_proc->ras_process_thread) {
+ kthread_stop(ras_proc->ras_process_thread);
+ ras_proc->ras_process_thread = NULL;
+ }
+
+ kfifo_free(&ras_proc->event_fifo);
+
+ return 0;
+}
+
+static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ atomic_inc(&ras_proc->umc_interrupt_count);
+ atomic_inc(&ras_proc->ras_interrupt_req);
+
+ wake_up(&ras_proc->ras_process_wq);
+ return 0;
+}
+
+static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = ras_process_put_event(ras_core, req);
+ if (!ret) {
+ atomic_inc(&ras_proc->ras_interrupt_req);
+ wake_up(&ras_proc->ras_process_wq);
+ }
+
+ return ret;
+}
+
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc)
+{
+ int ret;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ if (!ras_core->is_initialized)
+ return -EPERM;
+
+ if (is_umc)
+ ret = ras_process_add_umc_interrupt_req(ras_core, req);
+ else
+ ret = ras_process_add_non_umc_interrupt_req(ras_core, req);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
new file mode 100644
index 000000000000..28458b50510e
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PROCESS_H__
+#define __RAS_PROCESS_H__
+
+struct ras_event_req {
+ uint64_t seqno;
+ uint32_t idx_vf;
+ uint32_t block;
+ uint16_t pasid;
+ uint32_t reset;
+ void *pasid_fn;
+ void *data;
+};
+
+struct ras_process {
+ void *dev;
+ void *ras_process_thread;
+ wait_queue_head_t ras_process_wq;
+ atomic_t ras_interrupt_req;
+ atomic_t umc_interrupt_count;
+ struct kfifo event_fifo;
+ spinlock_t fifo_spinlock;
+};
+
+struct ras_core_context;
+int ras_process_init(struct ras_core_context *ras_core);
+int ras_process_fini(struct ras_core_context *ras_core);
+int ras_process_handle_ras_event(struct ras_core_context *ras_core);
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
new file mode 100644
index 000000000000..ccdb42d2dd60
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_ta_if.h"
+#include "ras_psp.h"
+#include "ras_psp_v13_0.h"
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+static const struct ras_psp_ip_func *ras_psp_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &ras_psp_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "psp ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct ras_psp_sys_status status = {0};
+ int ret;
+
+ if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) {
+ ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status);
+ if (ret)
+ return ret;
+
+ if (status.initialized) {
+ ta_ctx->preload_ras_ta_enabled = true;
+ ta_ctx->ras_ta_initialized = status.initialized;
+ ta_ctx->session_id = status.session_id;
+ }
+
+ psp_ctx->external_mutex = status.psp_cmd_mutex;
+ }
+
+ return 0;
+}
+
+static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ if (psp->sys_func && psp->sys_func->get_ras_ta_init_param)
+ return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n");
+ return -EACCES;
+}
+
+static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct gpu_mem_block *gpu_mem = NULL;
+ int ret;
+
+ switch (mem_type) {
+ case GPU_MEM_TYPE_RAS_PSP_RING:
+ gpu_mem = &psp->psp_ring.ras_ring_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_CMD:
+ gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_FENCE:
+ gpu_mem = &psp->psp_ctx.out_fence_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_FW:
+ gpu_mem = &psp->ta_ctx.fw_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_CMD:
+ gpu_mem = &psp->ta_ctx.cmd_gpu_mem;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!gpu_mem->ref_count) {
+ ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem);
+ if (ret)
+ return NULL;
+ gpu_mem->mem_type = mem_type;
+ }
+
+ gpu_mem->ref_count++;
+
+ return gpu_mem;
+}
+
+static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core,
+ struct gpu_mem_block *gpu_mem)
+{
+ if (!gpu_mem)
+ return 0;
+
+ gpu_mem->ref_count--;
+
+ if (gpu_mem->ref_count > 0) {
+ return 0;
+ } else if (gpu_mem->ref_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Duplicate free gpu memory %u\n", gpu_mem->mem_type);
+ } else {
+ ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem);
+ memset(gpu_mem, 0, sizeof(*gpu_mem));
+ }
+
+ return 0;
+}
+
+static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_lock(psp_ctx->external_mutex);
+ else
+ mutex_lock(&psp_ctx->internal_mutex);
+}
+
+static void __release_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_unlock(psp_ctx->external_mutex);
+ else
+ mutex_unlock(&psp_ctx->internal_mutex);
+}
+
+static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ uint32_t ras_ring_wptr_dw;
+
+ ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core);
+
+ return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame));
+}
+
+static int __set_ring_frame_slot(struct ras_core_context *ras_core,
+ uint32_t slot)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ return psp->ip_func->psp_ras_ring_wptr_set(ras_core,
+ (slot * sizeof(struct psp_gfx_rb_frame)) >> 2);
+}
+
+static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core,
+ struct psp_gfx_rb_frame *frame)
+{
+ struct gpu_mem_block *ring_mem;
+ struct psp_gfx_rb_frame *rb_frame;
+ uint32_t max_frame_slot;
+ uint32_t slot_idx;
+ uint32_t write_flush_read_back = 0;
+ int ret = 0;
+
+ ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING);
+ if (!ring_mem)
+ return -ENOMEM;
+
+ max_frame_slot =
+ div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame));
+
+ rb_frame =
+ (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr;
+
+ slot_idx = __get_ring_frame_slot(ras_core);
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ memcpy(&rb_frame[slot_idx], frame, sizeof(*frame));
+
+ /* Do a read to force the write of the frame before writing
+ * write pointer.
+ */
+ write_flush_read_back = rb_frame[slot_idx].fence_value;
+ if (write_flush_read_back != frame->fence_value) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n",
+ rb_frame[slot_idx].cmd_buf_addr_hi,
+ rb_frame[slot_idx].cmd_buf_addr_lo,
+ rb_frame[slot_idx].fence_addr_hi,
+ rb_frame[slot_idx].fence_addr_lo,
+ write_flush_read_back, frame->fence_value);
+ ret = -EACCES;
+ goto err;
+ }
+
+ slot_idx++;
+
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ __set_ring_frame_slot(ras_core, slot_idx);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, ring_mem);
+ return ret;
+}
+
+static int send_psp_cmd(struct ras_core_context *ras_core,
+ enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data,
+ uint32_t cmd_size, struct psp_cmd_resp *resp)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct gpu_mem_block *psp_cmd_buf = NULL;
+ struct gpu_mem_block *psp_fence_buf = NULL;
+ struct psp_gfx_cmd_resp *gfx_cmd;
+ struct psp_gfx_rb_frame rb_frame;
+ int ret = 0;
+ int timeout = 1000;
+
+ if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id);
+ return -EINVAL;
+ }
+
+ __acquire_psp_cmd_lock(ras_core);
+
+ psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD);
+ if (!psp_cmd_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE);
+ if (!psp_fence_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr;
+ memset(gfx_cmd, 0, sizeof(*gfx_cmd));
+ gfx_cmd->cmd_id = gfx_cmd_id;
+ memcpy(&gfx_cmd->cmd, cmd_data, cmd_size);
+
+ psp_ctx->in_fence_value++;
+
+ memset(&rb_frame, 0, sizeof(rb_frame));
+ rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_value = psp_ctx->in_fence_value;
+
+ ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame);
+ if (ret) {
+ psp_ctx->in_fence_value--;
+ goto exit;
+ }
+
+ while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) !=
+ psp_ctx->in_fence_value) {
+ if (--timeout == 0)
+ break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (ras_core_ras_interrupt_detected(ras_core))
+ break;
+
+ msleep(2);
+ }
+
+ resp->status = gfx_cmd->resp.status;
+ resp->session_id = gfx_cmd->resp.session_id;
+
+exit:
+ ras_psp_put_gpu_mem(ras_core, psp_cmd_buf);
+ ras_psp_put_gpu_mem(ras_core, psp_fence_buf);
+
+ __release_psp_cmd_lock(ras_core);
+
+ return ret;
+}
+
+static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core,
+ struct ras_ta_cmd *ras_cmd)
+{
+
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n");
+ ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n");
+
+ switch (ras_cmd->ras_status) {
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case RAS_TA_STATUS__SUCCESS:
+ break;
+ case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
+}
+
+static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size,
+ void *out, uint32_t out_size)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ras_cmd;
+ struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret = 0;
+
+ if (!in || (in_size > sizeof(union ras_ta_cmd_input)) ||
+ (cmd_id >= MAX_RAS_TA_CMD_ID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id);
+ return -EINVAL;
+ }
+
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem)
+ return -ENOMEM;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+
+ mutex_lock(&ta_ctx->ta_mutex);
+
+ memset(ras_cmd, 0, sizeof(*ras_cmd));
+ ras_cmd->cmd_id = cmd_id;
+ memcpy(&ras_cmd->ras_in_message, in, in_size);
+
+ invoke_cmd.ta_cmd_id = cmd_id;
+ invoke_cmd.session_id = ta_ctx->session_id;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD,
+ &invoke_cmd, sizeof(invoke_cmd), &resp);
+
+ /* If err_event_athub occurs error inject was successful, however
+ * return status from TA is no long reliable
+ */
+ if (ras_core_ras_interrupt_detected(ras_core)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS: Failed to send psp cmd! ret:%d, status:%u\n",
+ ret, resp.status);
+ ret = -ESTRPIPE;
+ goto unlock;
+ }
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
+ RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!ras_cmd->ras_status && out && out_size)
+ memcpy(out, &ras_cmd->ras_out_message, out_size);
+
+ __check_ras_ta_cmd_resp(ras_core, ras_cmd);
+
+unlock:
+ mutex_unlock(&ta_ctx->ta_mutex);
+ ras_core_up_gpu_reset_lock(ras_core);
+out:
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int trigger_ras_ta_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ uint32_t dev_mask = 0;
+
+ switch (info->block_id) {
+ case RAS_TA_BLOCK__GFX:
+ if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type,
+ info->sub_block_index, &info->sub_block_index))
+ return -EINVAL;
+
+ dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask);
+ break;
+ case RAS_TA_BLOCK__SDMA:
+ dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask);
+ break;
+ case RAS_TA_BLOCK__VCN:
+ case RAS_TA_BLOCK__JPEG:
+ dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= RAS_TA_INST_SHIFT;
+ dev_mask &= RAS_TA_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR,
+ info, sizeof(*info), NULL, 0);
+}
+
+static int send_load_ta_fw_cmd(struct ras_core_context *ras_core,
+ struct ras_ta_ctx *ta_ctx)
+{
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ struct gpu_mem_block *fw_mem;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ta_cmd;
+ struct ras_ta_init_flags *ta_init_flags;
+ struct psp_gfx_cmd_load_ta psp_load_ta_cmd;
+ struct psp_cmd_resp resp = {0};
+ struct ras_ta_image_header *fw_hdr = NULL;
+ int ret;
+
+ fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW);
+ if (!fw_mem)
+ return -ENOMEM;
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param);
+ if (ret)
+ goto err;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ /* copy ras ta binary to shared gpu memory */
+ memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size);
+ fw_mem->mem_size = fw_bin->bin_size;
+
+ /* Initialize ras ta startup parameter */
+ ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+ ta_init_flags = &ta_cmd->ras_in_message.init_flags;
+
+ ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en;
+ ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode;
+ ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask;
+ ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num;
+ ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode;
+ ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask;
+
+ /* Setup load ras ta command */
+ memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd));
+ psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_len = fw_mem->mem_size;
+ psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA,
+ &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp);
+ if (!ret && !resp.status) {
+ /* Read TA version at FW offset 0x60 if TA version not found*/
+ fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr;
+ RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n",
+ (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF,
+ (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF);
+ ta_ctx->ta_version = fw_hdr->image_version;
+ ta_ctx->session_id = resp.session_id;
+ ta_ctx->ras_ta_initialized = true;
+ } else {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status);
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, fw_mem);
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int load_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ int ret;
+
+ fw_bin->bin_addr = ras_ta_load->bin_addr;
+ fw_bin->bin_size = ras_ta_load->bin_size;
+ fw_bin->fw_version = ras_ta_load->fw_version;
+ fw_bin->feature_version = ras_ta_load->feature_version;
+
+ ret = send_load_ta_fw_cmd(ras_core, ta_ctx);
+ if (!ret) {
+ ras_ta_load->out_session_id = ta_ctx->session_id;
+ ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version;
+ }
+
+ return ret;
+}
+
+static int unload_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core))
+ return -EACCES;
+
+ cmd_unload_ta.session_id = ta_ctx->session_id;
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA,
+ &cmd_unload_ta, sizeof(cmd_unload_ta), &resp);
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to unload RAS TA! ret:%d, status:%u\n",
+ ret, resp.status);
+ goto unlock;
+ }
+
+ kfree(ta_ctx->fw_bin.bin_addr);
+ memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin));
+ ta_ctx->ta_version = 0;
+ ta_ctx->ras_ta_initialized = false;
+ ta_ctx->session_id = 0;
+
+unlock:
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return ret;
+}
+
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ta_unload ras_ta_unload = {0};
+ int ret;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if (!ras_ta_load)
+ return -EINVAL;
+
+ if (ta_ctx->ras_ta_initialized) {
+ ras_ta_unload.ras_session_id = ta_ctx->session_id;
+ ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload);
+ if (ret)
+ return ret;
+ }
+
+ return load_ras_ta_firmware(ras_core, ras_ta_load);
+}
+
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if ((!ras_ta_unload) ||
+ (ras_ta_unload->ras_session_id != ta_ctx->session_id))
+ return -EINVAL;
+
+ return unload_ras_ta_firmware(ras_core, ras_ta_unload);
+}
+
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!info)
+ return -EINVAL;
+
+ return trigger_ras_ta_error(ras_core, info, instance_mask);
+}
+
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled &&
+ !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!addr_in || !addr_out)
+ return -EINVAL;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS,
+ addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out));
+}
+
+int ras_psp_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ memset(psp, 0, sizeof(*psp));
+
+ psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn;
+ if (!psp->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&psp->psp_ctx.internal_mutex);
+ mutex_init(&psp->ta_ctx.ta_mutex);
+
+ return 0;
+}
+
+int ras_psp_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ mutex_destroy(&psp->psp_ctx.internal_mutex);
+ mutex_destroy(&psp->ta_ctx.ta_mutex);
+
+ memset(psp, 0, sizeof(*psp));
+
+ return 0;
+}
+
+int ras_psp_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ psp->psp_ip_version = ras_core->config->psp_ip_version;
+
+ psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version);
+ if (!psp->ip_func)
+ return -EINVAL;
+
+ /* After GPU reset, the system RAS PSP status may change.
+ * therefore, it is necessary to synchronize the system status again.
+ */
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ return 0;
+}
+
+int ras_psp_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ bool ret = false;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized)
+ return false;
+
+ switch (cmd_id) {
+ case RAS_TA_CMD_ID__QUERY_ADDRESS:
+ /* Currently, querying the address from RAS TA is only supported
+ * when the RAS TA firmware is loaded during driver installation.
+ */
+ if (ta_ctx->preload_ras_ta_enabled)
+ ret = true;
+ break;
+ case RAS_TA_CMD_ID__TRIGGER_ERROR:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
new file mode 100644
index 000000000000..71776fecfd66
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PSP_H__
+#define __RAS_PSP_H__
+#include "ras.h"
+#include "ras_ta_if.h"
+
+struct ras_core_context;
+struct ras_ta_trigger_error_input;
+struct ras_ta_query_address_input;
+struct ras_ta_query_address_output;
+enum ras_ta_cmd_id;
+
+struct ras_ta_image_header {
+ uint32_t reserved1[24];
+ uint32_t image_version; /* [0x60] Off Chip Firmware Version */
+ uint32_t reserved2[39];
+};
+
+struct ras_psp_sys_status {
+ bool initialized;
+ uint32_t session_id;
+ void *psp_cmd_mutex;
+};
+
+struct ras_ta_init_param {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct gpu_mem_block {
+ uint32_t mem_type;
+ void *mem_bo;
+ uint64_t mem_mc_addr;
+ void *mem_cpu_addr;
+ uint32_t mem_size;
+ int ref_count;
+ void *private;
+};
+
+struct ras_psp_ip_func {
+ uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core);
+ int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr);
+};
+
+struct ras_psp_ring {
+ struct gpu_mem_block ras_ring_gpu_mem;
+};
+
+struct psp_cmd_resp {
+ uint32_t status;
+ uint32_t session_id;
+};
+
+struct ras_psp_ctx {
+ void *external_mutex;
+ struct mutex internal_mutex;
+ uint64_t in_fence_value;
+ struct gpu_mem_block psp_cmd_gpu_mem;
+ struct gpu_mem_block out_fence_gpu_mem;
+};
+
+struct ras_ta_fw_bin {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+};
+
+struct ras_ta_ctx {
+ bool preload_ras_ta_enabled;
+ bool ras_ta_initialized;
+ uint32_t session_id;
+ uint32_t resp_status;
+ uint32_t ta_version;
+ struct mutex ta_mutex;
+ struct ras_ta_fw_bin fw_bin;
+ struct ras_ta_init_param init_param;
+ struct gpu_mem_block fw_gpu_mem;
+ struct gpu_mem_block cmd_gpu_mem;
+};
+
+struct ras_psp {
+ uint32_t psp_ip_version;
+ struct ras_psp_ring psp_ring;
+ struct ras_psp_ctx psp_ctx;
+ struct ras_ta_ctx ta_ctx;
+ const struct ras_psp_ip_func *ip_func;
+ const struct ras_psp_sys_func *sys_func;
+};
+
+struct ras_psp_ta_load {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+ uint64_t out_session_id;
+ uint32_t out_loaded_ta_version;
+};
+
+struct ras_psp_ta_unload {
+ uint64_t ras_session_id;
+};
+
+int ras_psp_sw_init(struct ras_core_context *ras_core);
+int ras_psp_sw_fini(struct ras_core_context *ras_core);
+int ras_psp_hw_init(struct ras_core_context *ras_core);
+int ras_psp_hw_fini(struct ras_core_context *ras_core);
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load);
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload);
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask);
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out);
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
new file mode 100644
index 000000000000..626cf39b75ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_psp_v13_0.h"
+
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+
+static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core)
+{
+ return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67);
+}
+
+static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value)
+{
+ RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value);
+
+ return 0;
+}
+
+const struct ras_psp_ip_func ras_psp_v13_0 = {
+ .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get,
+ .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
new file mode 100644
index 000000000000..b705ffe38a12
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_PSP_V13_0_H__
+#define __RAS_PSP_V13_0_H__
+#include "ras_psp.h"
+
+extern const struct ras_psp_ip_func ras_psp_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
new file mode 100644
index 000000000000..0921e36d3274
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _RAS_TA_IF_H
+#define _RAS_TA_IF_H
+#include "ras.h"
+
+#define RAS_TA_HOST_IF_VER 0
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+/* invalid node instance value */
+#define RAS_TA_INV_NODE 0xffff
+
+/* RAS related enumerations */
+/**********************************************************/
+enum ras_ta_cmd_id {
+ RAS_TA_CMD_ID__ENABLE_FEATURES = 0,
+ RAS_TA_CMD_ID__DISABLE_FEATURES,
+ RAS_TA_CMD_ID__TRIGGER_ERROR,
+ RAS_TA_CMD_ID__QUERY_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_ADDRESS,
+ MAX_RAS_TA_CMD_ID
+};
+
+enum ras_ta_status {
+ RAS_TA_STATUS__SUCCESS = 0x0000,
+ RAS_TA_STATUS__RESET_NEEDED = 0xA001,
+ RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008,
+ RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ RAS_TA_STATUS__ERROR_GENERIC = 0xA00A,
+ RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019,
+ RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A
+};
+
+enum ras_ta_block {
+ RAS_TA_BLOCK__UMC = 0,
+ RAS_TA_BLOCK__SDMA,
+ RAS_TA_BLOCK__GFX,
+ RAS_TA_BLOCK__MMHUB,
+ RAS_TA_BLOCK__ATHUB,
+ RAS_TA_BLOCK__PCIE_BIF,
+ RAS_TA_BLOCK__HDP,
+ RAS_TA_BLOCK__XGMI_WAFL,
+ RAS_TA_BLOCK__DF,
+ RAS_TA_BLOCK__SMN,
+ RAS_TA_BLOCK__SEM,
+ RAS_TA_BLOCK__MP0,
+ RAS_TA_BLOCK__MP1,
+ RAS_TA_BLOCK__FUSE,
+ RAS_TA_BLOCK__MCA,
+ RAS_TA_BLOCK__VCN,
+ RAS_TA_BLOCK__JPEG,
+ RAS_TA_BLOCK__IH,
+ RAS_TA_BLOCK__MPIO,
+ RAS_TA_BLOCK__MMSCH,
+ RAS_TA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_mca_block {
+ RAS_TA_MCA_BLOCK__MP0 = 0,
+ RAS_TA_MCA_BLOCK__MP1 = 1,
+ RAS_TA_MCA_BLOCK__MPIO = 2,
+ RAS_TA_MCA_BLOCK__IOHC = 3,
+ RAS_TA_MCA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_error_type {
+ RAS_TA_ERROR__NONE = 0,
+ RAS_TA_ERROR__PARITY = 1,
+ RAS_TA_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TA_ERROR__POISON = 8,
+};
+
+enum ras_ta_address_type {
+ RAS_TA_MCA_TO_PA,
+ RAS_TA_PA_TO_MCA,
+};
+
+enum ras_ta_nps_mode {
+ RAS_TA_UNKNOWN_MODE = 0,
+ RAS_TA_NPS1_MODE = 1,
+ RAS_TA_NPS2_MODE = 2,
+ RAS_TA_NPS4_MODE = 4,
+ RAS_TA_NPS8_MODE = 8,
+};
+
+/* Input/output structures for RAS commands */
+/**********************************************************/
+
+struct ras_ta_enable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_disable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_trigger_error_input {
+ /* ras-block. i.e. umc, gfx */
+ enum ras_ta_block block_id;
+
+ /* type of error. i.e. single_correctable */
+ enum ras_ta_error_type inject_error_type;
+
+ /* mem block. i.e. hbm, sram etc. */
+ uint32_t sub_block_index;
+
+ /* explicit address of error */
+ uint64_t address;
+
+ /* method if error injection. i.e persistent, coherent etc. */
+ uint64_t value;
+};
+
+struct ras_ta_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct ras_ta_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct ras_ta_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ras_ta_query_address_input {
+ enum ras_ta_address_type addr_type;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+struct ras_ta_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
+struct ras_ta_query_address_output {
+ /* don't use the flags here */
+ struct ras_ta_output_flags flags;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+/* Common input structure for RAS callbacks */
+/**********************************************************/
+union ras_ta_cmd_input {
+ struct ras_ta_init_flags init_flags;
+ struct ras_ta_enable_features_input enable_features;
+ struct ras_ta_disable_features_input disable_features;
+ struct ras_ta_trigger_error_input trigger_error;
+ struct ras_ta_query_address_input address;
+ uint32_t reserve_pad[256];
+};
+
+union ras_ta_cmd_output {
+ struct ras_ta_output_flags flags;
+ struct ras_ta_query_address_output address;
+ uint32_t reserve_pad[256];
+};
+
+struct ras_ta_cmd {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ras_ta_cmd_input ras_in_message;
+ union ras_ta_cmd_output ras_out_message;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
new file mode 100644
index 000000000000..4067359bb299
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
@@ -0,0 +1,706 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_umc_v12_0.h"
+
+#define MAX_ECC_NUM_PER_RETIREMENT 16
+
+/* bad page timestamp format
+ * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0]
+ */
+#define EEPROM_TIMESTAMP_MINUTE 6
+#define EEPROM_TIMESTAMP_HOUR 12
+#define EEPROM_TIMESTAMP_DAY 17
+#define EEPROM_TIMESTAMP_MONTH 23
+#define EEPROM_TIMESTAMP_YEAR 27
+
+static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core)
+{
+ struct ras_time tm = {0};
+ uint64_t utc_timestamp = 0;
+ uint64_t eeprom_timestamp = 0;
+
+ utc_timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ if (!utc_timestamp)
+ return utc_timestamp;
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm);
+
+ /* the year range is 2000 ~ 2031, set the year if not in the range */
+ if (tm.tm_year < 2000)
+ tm.tm_year = 2000;
+ if (tm.tm_year > 2031)
+ tm.tm_year = 2031;
+
+ tm.tm_year -= 2000;
+
+ eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE)
+ + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR)
+ + (tm.tm_mday << EEPROM_TIMESTAMP_DAY)
+ + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH)
+ + (tm.tm_year << EEPROM_TIMESTAMP_YEAR);
+ eeprom_timestamp &= 0xffffffff;
+
+ return eeprom_timestamp;
+}
+
+static const struct ras_umc_ip_func *ras_umc_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(12, 0, 0):
+ return &ras_umc_func_v12_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "UMC ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps)
+{
+ struct ras_ta_query_address_input addr_in;
+ struct ras_ta_query_address_output addr_out;
+ int ret;
+
+ if (!in)
+ return -EINVAL;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.ma.err_addr = in->err_addr;
+ addr_in.ma.ch_inst = in->ch_inst;
+ addr_in.ma.umc_inst = in->umc_inst;
+ addr_in.ma.node_inst = in->node_inst;
+ addr_in.ma.socket_id = in->socket_id;
+
+ addr_in.addr_type = RAS_TA_MCA_TO_PA;
+
+ ret = ras_psp_query_address(ras_core, &addr_in, &addr_out);
+ if (ret) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to query RAS physical address for 0x%llx, ret:%d",
+ in->err_addr, ret);
+ return -EREMOTEIO;
+ }
+
+ if (out) {
+ out->pa = addr_out.pa.pa;
+ out->bank = addr_out.pa.bank;
+ out->channel_idx = addr_out.pa.channel_idx;
+ }
+
+ return 0;
+}
+
+static int ras_umc_log_ecc(struct ras_core_context *ras_core,
+ unsigned long idx, void *data)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret;
+
+ mutex_lock(&ras_umc->tree_lock);
+ ret = radix_tree_insert(&ras_umc->root, idx, data);
+ if (!ret)
+ radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return ret;
+}
+
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t buf[8] = {0};
+ void **slot;
+ void *data;
+ void *iter = buf;
+
+ mutex_lock(&ras_umc->tree_lock);
+ radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) {
+ data = ras_radix_tree_delete_iter(&ras_umc->root, iter);
+ kfree(data);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return 0;
+}
+
+static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t page_pfn[16];
+ int count = 0, i;
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) {
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Fail to convert error address! count:%d\n", count);
+ return;
+ }
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]);
+}
+
+/* When gpu reset is ongoing, ecc logging operations will be pended.
+ */
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node;
+
+ ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL);
+ if (!ecc_node)
+ return -ENOMEM;
+
+ memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc));
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list);
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+/* After gpu reset is complete, re-log the pending error banks.
+ */
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record umc_rec;
+ struct eeprom_umc_record *err_rec;
+ int ret;
+
+ memset(&umc_rec, 0, sizeof(umc_rec));
+
+ mutex_lock(&ras_umc->bank_log_lock);
+ ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec);
+ if (ret)
+ goto out;
+
+ err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL);
+ if (!err_rec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(err_rec, &umc_rec, sizeof(umc_rec));
+ ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec);
+ if (ret) {
+ if (ret == -EEXIST) {
+ RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n");
+ ret = 0;
+ }
+
+ kfree(err_rec);
+ goto out;
+ }
+
+ ras_umc_reserve_eeprom_record(ras_core, err_rec);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL);
+
+out:
+ mutex_unlock(&ras_umc->bank_log_lock);
+ return ret;
+}
+
+static int ras_umc_get_new_records(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, u32 num)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT;
+ int count = 0;
+ int new_detected, i;
+
+ mutex_lock(&ras_umc->tree_lock);
+ new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries,
+ 0, entry_num, UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record));
+ count++;
+ radix_tree_tag_clear(&ras_umc->root,
+ entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return count;
+}
+
+static bool ras_umc_check_retired_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t nps = 0;
+ int i, ret;
+
+ if (from_eeprom) {
+ nps = ras_umc->umc_err_data.umc_nps_mode;
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) {
+ ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps);
+ if (ret)
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to adjust eeprom record, ret:%d", ret);
+ }
+ return false;
+ }
+
+ for (i = 0; i < data->count; i++) {
+ if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) &&
+ (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn))
+ return true;
+ }
+
+ return false;
+}
+
+/* alloc/realloc bps array */
+static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core,
+ struct eeprom_store_record *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!bps)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(bps, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = bps;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn[16];
+ int count = 0, j;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages)
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+
+ if (count > 0) {
+ for (j = 0; j < count; j++) {
+ bps->cur_nps_retired_row_pfn = page_pfn[j];
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+ } else {
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+/* it deal with vram only. */
+static int ras_umc_add_bad_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps,
+ int pages, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *data = &ras_umc->umc_err_data;
+ int i, ret = 0;
+
+ if (!bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < pages; i++) {
+ if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom))
+ continue;
+
+ ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+
+ if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn)
+ continue;
+
+ data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn;
+
+ if (from_eeprom)
+ ras_umc_reserve_eeprom_record(ras_core, &bps[i]);
+
+ ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+ }
+out:
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core)
+{
+ struct eeprom_umc_record *bps;
+ uint32_t ras_num_recs;
+ int ret;
+
+ ras_num_recs = ras_eeprom_get_record_count(ras_core);
+ /* no bad page record, skip eeprom access */
+ if (!ras_num_recs ||
+ ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE)
+ return 0;
+
+ bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ ret = ras_eeprom_read(ras_core, bps, ras_num_recs);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!");
+ } else {
+ ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN;
+ ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true);
+ }
+
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
+ */
+static int ras_umc_save_bad_pages(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t eeprom_record_num;
+ int save_count;
+ int ret = 0;
+
+ if (!data->bps)
+ return 0;
+
+ eeprom_record_num = ras_eeprom_get_record_count(ras_core);
+ mutex_lock(&ras_umc->umc_lock);
+ save_count = data->count - eeprom_record_num;
+ /* only new entries are saved */
+ if (save_count > 0) {
+ if (ras_eeprom_append(ras_core,
+ &data->bps[eeprom_record_num],
+ save_count)) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!");
+ ret = -EIO;
+ goto exit;
+ }
+
+ RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
+exit:
+ mutex_unlock(&ras_umc->umc_lock);
+ return ret;
+}
+
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data)
+{
+ struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT];
+ int count, ret;
+
+ memset(records, 0, sizeof(records));
+ count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records));
+ if (count <= 0)
+ return -ENODATA;
+
+ ret = ras_umc_add_bad_pages(ras_core, records, count, false);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n");
+ return -EINVAL;
+ }
+
+ ret = ras_umc_save_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ras_umc_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+
+ memset(ras_umc, 0, sizeof(*ras_umc));
+
+ INIT_LIST_HEAD(&ras_umc->pending_ecc_list);
+
+ INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL);
+
+ mutex_init(&ras_umc->tree_lock);
+ mutex_init(&ras_umc->pending_ecc_lock);
+ mutex_init(&ras_umc->umc_lock);
+ mutex_init(&ras_umc->bank_log_lock);
+
+ return 0;
+}
+
+int ras_umc_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_destroy(&ras_umc->umc_lock);
+ mutex_destroy(&ras_umc->bank_log_lock);
+
+ if (umc_err_data->rom_data.bps) {
+ umc_err_data->rom_data.count = 0;
+ kfree(umc_err_data->rom_data.bps);
+ umc_err_data->rom_data.bps = NULL;
+ umc_err_data->rom_data.space_left = 0;
+ }
+
+ if (umc_err_data->ram_data.bps) {
+ umc_err_data->ram_data.count = 0;
+ kfree(umc_err_data->ram_data.bps);
+ umc_err_data->ram_data.bps = NULL;
+ umc_err_data->ram_data.space_left = 0;
+ }
+
+ ras_umc_clear_logged_ecc(ras_core);
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ mutex_destroy(&ras_umc->tree_lock);
+ mutex_destroy(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint32_t nps;
+
+ nps = ras_core_get_curr_nps_mode(ras_core);
+
+ if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_err_data.umc_nps_mode = nps;
+
+ ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type;
+ if (!ras_umc->umc_vram_type) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n",
+ ras_umc->umc_vram_type);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_ip_version = ras_core->config->umc_ip_version;
+ ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version);
+ if (!ras_umc->ip_func)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ras_umc_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data;
+
+ mutex_lock(&ras_core->ras_umc.umc_lock);
+
+ kfree(data->rom_data.bps);
+ kfree(data->ram_data.bps);
+
+ memset(data, 0, sizeof(*data));
+ mutex_unlock(&ras_core->ras_umc.umc_lock);
+
+ return 0;
+}
+
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record)
+{
+ struct eeprom_umc_record *err_rec = record;
+
+ /* Set bad page pfn and nps mode */
+ EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec,
+ RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps);
+
+ err_rec->address = err_addr;
+ err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core);
+ err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = cur_nps_addr->channel_idx;
+ err_rec->mcumc_id = umc_inst;
+ err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa);
+ err_rec->cur_nps_bank = cur_nps_addr->bank;
+ err_rec->cur_nps = cur_nps;
+ return 0;
+}
+
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data;
+
+ return err_data->rom_data.count;
+}
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ return data->count;
+}
+
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ if (index >= data->count)
+ return -EINVAL;
+
+ memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record));
+ return 0;
+}
+
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn = RAS_ADDR_TO_PFN(addr);
+ int i, ret = false;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < data->count; i++) {
+ if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret = 0;
+
+ if (bank_to_pa)
+ ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa);
+ else
+ ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
new file mode 100644
index 000000000000..7d9e779d8c4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_UMC_H__
+#define __RAS_UMC_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_cmd.h"
+
+#define UMC_VRAM_TYPE_UNKNOWN 0
+#define UMC_VRAM_TYPE_GDDR1 1
+#define UMC_VRAM_TYPE_DDR2 2
+#define UMC_VRAM_TYPE_GDDR3 3
+#define UMC_VRAM_TYPE_GDDR4 4
+#define UMC_VRAM_TYPE_GDDR5 5
+#define UMC_VRAM_TYPE_HBM 6
+#define UMC_VRAM_TYPE_DDR3 7
+#define UMC_VRAM_TYPE_DDR4 8
+#define UMC_VRAM_TYPE_GDDR6 9
+#define UMC_VRAM_TYPE_DDR5 10
+#define UMC_VRAM_TYPE_LPDDR4 11
+#define UMC_VRAM_TYPE_LPDDR5 12
+#define UMC_VRAM_TYPE_HBM3E 13
+
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
+#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF)
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define UMC_PA_FLIP_BITS_NUM 4
+
+enum umc_memory_partition_mode {
+ UMC_MEMORY_PARTITION_MODE_NONE = 0,
+ UMC_MEMORY_PARTITION_MODE_NPS1 = 1,
+ UMC_MEMORY_PARTITION_MODE_NPS2 = 2,
+ UMC_MEMORY_PARTITION_MODE_NPS3 = 3,
+ UMC_MEMORY_PARTITION_MODE_NPS4 = 4,
+ UMC_MEMORY_PARTITION_MODE_NPS6 = 6,
+ UMC_MEMORY_PARTITION_MODE_NPS8 = 8,
+ UMC_MEMORY_PARTITION_MODE_UNKNOWN
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+
+struct umc_flip_bits {
+ uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
+struct umc_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct umc_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct umc_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+};
+
+struct ras_umc_ip_func {
+ int (*bank_to_eeprom_record)(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record);
+ int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps);
+ int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num);
+ int (*bank_to_soc_pa)(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr, uint64_t *soc_pa);
+ int (*soc_pa_to_bank)(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct umc_bank_addr *bank_addr);
+};
+
+struct eeprom_store_record {
+ /* point to data records array */
+ struct eeprom_umc_record *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+};
+
+struct ras_umc_err_data {
+ struct eeprom_store_record rom_data;
+ struct eeprom_store_record ram_data;
+ enum umc_memory_partition_mode umc_nps_mode;
+ uint64_t last_retired_pfn;
+};
+
+struct ras_umc {
+ u32 umc_ip_version;
+ u32 umc_vram_type;
+ const struct ras_umc_ip_func *ip_func;
+ struct radix_tree_root root;
+ struct mutex tree_lock;
+ struct mutex umc_lock;
+ struct mutex bank_log_lock;
+ struct mutex pending_ecc_lock;
+ struct ras_umc_err_data umc_err_data;
+ struct list_head pending_ecc_list;
+};
+
+int ras_umc_sw_init(struct ras_core_context *ras);
+int ras_umc_sw_fini(struct ras_core_context *ras);
+int ras_umc_hw_init(struct ras_core_context *ras);
+int ras_umc_hw_fini(struct ras_core_context *ras);
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps);
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data);
+int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank);
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank);
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core);
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core);
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core);
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core);
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core);
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record);
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr);
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
new file mode 100644
index 000000000000..5d9a11c17a86
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_core_status.h"
+#include "ras_umc_v12_0.h"
+
+#define NumDieInterleaved 4
+
+static const uint32_t umc_v12_0_channel_idx_tbl[]
+ [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
+ {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
+ {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
+ {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
+ {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
+ {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
+ {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
+ {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
+ {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
+};
+
+/* mapping of MCA error address to normalized address */
+static const uint32_t umc_v12_0_ma2na_mapping[] = {
+ 0, 5, 6, 8, 9, 14, 12, 13,
+ 10, 11, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 24, 7, 29, 30,
+};
+
+static bool umc_v12_0_bit_wise_xor(uint32_t val)
+{
+ bool result = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ result = result ^ ((val >> i) & 0x1);
+
+ return result;
+}
+
+static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core,
+ enum umc_memory_partition_mode nps,
+ struct umc_flip_bits *flip_bits)
+{
+ uint32_t vram_type = ras_core->ras_umc.umc_vram_type;
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case UMC_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case UMC_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+}
+
+static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core,
+ uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok)
+{
+ struct umc_flip_bits flip_bits = {0};
+ uint64_t row_pa;
+ int i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = pa;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < flip_bits.bit_num; i++)
+ row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]);
+
+ if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa))
+ row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]);
+
+ return row_pa;
+}
+
+static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num,
+ uint64_t seq_no, bool dump)
+{
+ uint32_t col, col_lower, row, row_lower, idx, row_high;
+ uint64_t soc_pa, row_pa, column, err_addr;
+ uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn);
+ struct umc_flip_bits flip_bits = {0};
+ uint32_t retire_unit;
+ uint32_t i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true);
+
+ err_addr = record->address;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(flip_bits.flip_row_bit);
+
+ if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) {
+ row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
+
+ idx = 0;
+ row = 0;
+ retire_unit = 0x1 << flip_bits.bit_num;
+ /* loop for all possibilities of retire bits */
+ for (column = 0; column < retire_unit; column++) {
+ soc_pa = row_pa;
+ for (i = 0; i < flip_bits.bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+
+ /* add row bit 13 */
+ if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM)
+ row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower;
+
+ if (dump)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ seq_no, soc_pa, row, col,
+ record->cur_nps_bank, record->mem_channel);
+
+
+ if (pfns && (idx < num))
+ pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa);
+ }
+
+ return idx;
+}
+
+static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ uint32_t i, na_shift;
+ uint64_t soc_pa, na, na_nps;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
+ uint32_t bank0, bank1, bank2, bank3, bank;
+ uint32_t ch_inst = addr_in->ch_inst;
+ uint32_t umc_inst = addr_in->umc_inst;
+ uint32_t node_inst = addr_in->node_inst;
+ uint32_t socket_id = addr_in->socket_id;
+ uint32_t channel_index;
+ uint64_t err_addr = addr_in->err_addr;
+
+ if (node_inst != UMC_INV_AID_NODE) {
+ if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM ||
+ umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM ||
+ node_inst >= UMC_V12_0_AID_NUM_MAX ||
+ socket_id >= UMC_V12_0_SOCKET_NUM_MAX)
+ return -EINVAL;
+ } else {
+ if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX ||
+ ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM)
+ return -EINVAL;
+ }
+
+ bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
+ bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
+ bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
+ bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+
+ /* apply bank hash algorithm */
+ bank0 =
+ bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
+ bank1 =
+ bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
+ bank2 =
+ bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
+ bank3 =
+ bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
+
+ bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
+ err_addr &= ~0x3c0ULL;
+ err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
+
+ na_nps = 0x0;
+ /* convert mca error address to normalized address */
+ for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
+ na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS1)
+ na_shift = 8;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ na_shift = 9;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ na_shift = 10;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8)
+ na_shift = 11;
+ else
+ return -EINVAL;
+
+ na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff);
+
+ if (node_inst != UMC_INV_AID_NODE)
+ channel_index =
+ umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst];
+ else {
+ channel_index = ch_inst;
+ node_inst = channel_index /
+ (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM);
+ }
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* calc channel hash based on absolute address */
+ soc_pa += socket_id * SOCKET_LFB_SIZE;
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ /* restore pa */
+ soc_pa -= socket_id * SOCKET_LFB_SIZE;
+
+ /* get some channel bits from na_nps directly and
+ * add nps section offset
+ */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT);
+ soc_pa |= ((na_nps & 0x100) << 5);
+ soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x300) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) {
+ soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x700) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) +
+ (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3);
+ }
+
+ addr_out->pa = soc_pa;
+ addr_out->bank = bank;
+ addr_out->channel_idx = channel_index;
+
+ return 0;
+}
+
+static int convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ int ret;
+
+ if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS))
+ ret = ras_umc_psp_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+ else
+ ret = umc_v12_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+
+ return ret;
+}
+
+static int convert_bank_to_nps_addr(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps)
+{
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr);
+ addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid);
+ addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid);
+ addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid);
+ addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid);
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (!ret) {
+ pa_addr->pa =
+ convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+ pa_addr->channel_idx = addr_out.channel_idx;
+ pa_addr->bank = addr_out.bank;
+ }
+
+ return ret;
+}
+
+static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record)
+{
+ struct umc_phy_addr nps_addr;
+ int ret;
+
+ memset(&nps_addr, 0, sizeof(nps_addr));
+
+ ret = convert_bank_to_nps_addr(ras_core, bank,
+ &nps_addr, bank->nps);
+ if (ret)
+ return ret;
+
+ ras_umc_fill_eeprom_record(ras_core,
+ ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid),
+ &nps_addr, bank->nps, record);
+
+ lookup_bad_pages_in_a_row(ras_core, record,
+ bank->nps, NULL, 0, bank->seq_no, true);
+
+ return 0;
+}
+
+static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps)
+{
+ struct device_system_info dev_info = {0};
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ addr_in.err_addr = record->address;
+ addr_in.ch_inst = record->mem_channel;
+ addr_in.umc_inst = record->mcumc_id;
+ addr_in.node_inst = UMC_INV_AID_NODE;
+ addr_in.socket_id = dev_info.socket_id;
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (ret)
+ return ret;
+
+ *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+
+ return 0;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps)
+{
+ uint64_t pa = 0;
+ int ret = 0;
+
+ if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) {
+ record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record);
+ } else {
+ ret = convert_eeprom_record_to_nps_addr(ras_core,
+ record, &pa, nps);
+ if (!ret)
+ record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa);
+ }
+
+ record->cur_nps = nps;
+
+ return ret;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num)
+{
+ return lookup_bad_pages_in_a_row(ras_core,
+ record, nps, pfns, num, 0, false);
+}
+
+static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa,
+ struct umc_bank_addr *bank_addr)
+{
+
+ int channel_hashed = 0;
+ int channel_real = 0;
+ int channel_reversed = 0;
+ int i = 0;
+
+ bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa);
+ bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */
+ bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa);
+ bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa);
+ bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa);
+
+ /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */
+ channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7;
+
+ for (i = 0; i < 8; i++) {
+ channel_reversed = 0;
+ channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2);
+ if (channel_reversed == channel_hashed)
+ channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf);
+ }
+
+ bank_addr->channel = channel_real;
+ bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa);
+
+ return 0;
+}
+
+static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr,
+ uint64_t *soc_pa)
+{
+ uint64_t na = 0;
+ uint64_t tmp_pa = 0;
+ *soc_pa = 0;
+
+ tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id);
+ tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank);
+ tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row);
+ tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column);
+ tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel);
+ tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel);
+
+ /* Get the NA */
+ na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT);
+ na |= tmp_pa & 0xff;
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ tmp_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(bank_addr.channel) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa);
+
+ *soc_pa = tmp_pa;
+
+ return 0;
+}
+
+const struct ras_umc_ip_func ras_umc_func_v12_0 = {
+ .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record,
+ .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record,
+ .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages,
+ .bank_to_soc_pa = umc_12_0_bank_to_soc_pa,
+ .soc_pa_to_bank = umc_12_0_soc_pa_to_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
new file mode 100644
index 000000000000..8a35ad856165
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_UMC_V12_0_H__
+#define __RAS_UMC_V12_0_H__
+#include "ras.h"
+
+/* MCA_UMC_UMC0_MCUMC_ADDRT0 */
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L
+
+/* MCMP1_IPIDT0 */
+#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0
+#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20
+#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c
+#define MCMP1_IPIDT0__McaType__SHIFT 0x30
+
+#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL
+#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L
+#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L
+#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8
+/* number of umc instance with memory map register access */
+#define UMC_V12_0_UMC_INSTANCE_NUM 4
+
+/* one piece of normalized address is mapped to 8 pieces of physical address */
+#define UMC_V12_0_NA_MAP_PA_NUM 8
+
+/* bank bits in MCA error address */
+#define UMC_V12_0_MCA_B0_BIT 6
+#define UMC_V12_0_MCA_B1_BIT 7
+#define UMC_V12_0_MCA_B2_BIT 8
+#define UMC_V12_0_MCA_B3_BIT 9
+
+/* row bits in MCA address */
+#define UMC_V12_0_MCA_R0_BIT 10
+
+/* Stack ID bits in SOC physical address */
+#define UMC_V12_0_PA_SID1_BIT 37
+#define UMC_V12_0_PA_SID0_BIT 36
+
+/* bank bits in SOC physical address */
+#define UMC_V12_0_PA_B3_BIT 18
+#define UMC_V12_0_PA_B2_BIT 17
+#define UMC_V12_0_PA_B1_BIT 20
+#define UMC_V12_0_PA_B0_BIT 19
+
+/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R13_BIT 35
+#define UMC_V12_0_PA_R12_BIT 34
+#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R10_BIT 32
+#define UMC_V12_0_PA_R9_BIT 31
+#define UMC_V12_0_PA_R8_BIT 30
+#define UMC_V12_0_PA_R7_BIT 29
+#define UMC_V12_0_PA_R6_BIT 28
+#define UMC_V12_0_PA_R5_BIT 27
+#define UMC_V12_0_PA_R4_BIT 26
+#define UMC_V12_0_PA_R3_BIT 25
+#define UMC_V12_0_PA_R2_BIT 24
+#define UMC_V12_0_PA_R1_BIT 23
+#define UMC_V12_0_PA_R0_BIT 22
+
+/* column bits in SOC physical address */
+#define UMC_V12_0_PA_C4_BIT 21
+#define UMC_V12_0_PA_C3_BIT 16
+#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C1_BIT 6
+#define UMC_V12_0_PA_C0_BIT 5
+
+/* channel index bits in SOC physical address */
+#define UMC_V12_0_PA_CH6_BIT 14
+#define UMC_V12_0_PA_CH5_BIT 13
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH3_BIT 11
+#define UMC_V12_0_PA_CH2_BIT 10
+#define UMC_V12_0_PA_CH1_BIT 9
+#define UMC_V12_0_PA_CH0_BIT 8
+
+/* Pseudochannel index bits in SOC physical address */
+#define UMC_V12_0_PA_PC0_BIT 7
+
+#define UMC_V12_0_NA_C2_BIT 8
+
+#define UMC_V12_0_SOC_PA_TO_SID(pa) \
+ ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL))
+
+#define UMC_V12_0_SOC_PA_TO_BANK(pa) \
+ ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL))
+
+#define UMC_V12_0_SOC_PA_TO_ROW(pa) \
+ ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \
+ (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \
+ (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \
+ (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \
+ (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \
+ (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \
+ (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \
+ (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL))
+
+#define UMC_V12_0_SOC_PA_TO_COL(pa) \
+ ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL))
+
+#define UMC_V12_0_SOC_PA_TO_CH(pa) \
+ ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL))
+
+#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL)
+
+#define UMC_V12_0_SOC_SID_TO_PA(sid) \
+ ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \
+ (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT))
+
+#define UMC_V12_0_SOC_BANK_TO_PA(bank) \
+ ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \
+ (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \
+ (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \
+ (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT))
+
+#define UMC_V12_0_SOC_ROW_TO_PA(row) \
+ ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \
+ (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \
+ (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \
+ (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \
+ (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \
+ (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \
+ (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \
+ (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \
+ (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \
+ (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \
+ (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \
+ (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \
+ (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \
+ (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT))
+
+#define UMC_V12_0_SOC_COL_TO_PA(col) \
+ ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \
+ (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \
+ (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \
+ (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \
+ (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT))
+
+#define UMC_V12_0_SOC_CH_TO_PA(ch) \
+ ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \
+ (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \
+ (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \
+ (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \
+ (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \
+ (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \
+ (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT))
+
+#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT)
+
+/* bank hash settings */
+#define UMC_V12_0_XOR_EN0 1
+#define UMC_V12_0_XOR_EN1 1
+#define UMC_V12_0_XOR_EN2 1
+#define UMC_V12_0_XOR_EN3 1
+#define UMC_V12_0_COL_XOR0 0x0
+#define UMC_V12_0_COL_XOR1 0x0
+#define UMC_V12_0_COL_XOR2 0x800
+#define UMC_V12_0_COL_XOR3 0x1000
+#define UMC_V12_0_ROW_XOR0 0x11111
+#define UMC_V12_0_ROW_XOR1 0x22222
+#define UMC_V12_0_ROW_XOR2 0x4444
+#define UMC_V12_0_ROW_XOR3 0x8888
+
+/* channel hash settings */
+#define UMC_V12_0_HASH_4K 0
+#define UMC_V12_0_HASH_64K 1
+#define UMC_V12_0_HASH_2M 1
+#define UMC_V12_0_HASH_1G 1
+#define UMC_V12_0_HASH_1T 1
+
+/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
+ (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
+ (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
+ (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
+ (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
+ (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
+ } while (0)
+
+
+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/*
+ * (addr / 256) * 32768, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
+#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3)
+#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \
+ (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF))
+#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+#define ACA_IPID_2_UMC_CH(ipid) \
+ (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_UMC_INST(ipid) \
+ (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
+#define ACA_ADDR_2_ERR_ADDR(addr) \
+ REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr)
+
+/* R13 bit shift should be considered, double the number */
+#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+
+
+/* C2, C3, C4, R13, four MCA bits are looped in page retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
+
+/* invalid node instance value */
+#define UMC_INV_AID_NODE 0xffff
+
+#define UMC_V12_0_AID_NUM_MAX 4
+#define UMC_V12_0_SOCKET_NUM_MAX 8
+
+#define UMC_V12_0_TOTAL_CHANNEL_NUM \
+ (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM)
+
+/* one device has 192GB HBM */
+#define SOCKET_LFB_SIZE 0x3000000000ULL
+
+extern const struct ras_umc_ip_func ras_umc_func_v12_0;
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+#endif
+
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 49803528023b..f9fdf19de74a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2552,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
+ /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
};
#undef OUI
@@ -2841,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+/*
+ * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and
+ * Appendix L.1 Derivation of Slice Count Requirements.
+ */
+static int dsc_sink_min_slice_throughput(int peak_pixel_rate)
+{
+ if (peak_pixel_rate >= 4800000)
+ return 600000;
+ else if (peak_pixel_rate >= 2700000)
+ return 400000;
+ else
+ return 340000;
+}
+
+/**
+ * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice
+ * @dsc_dpcd: DSC sink's capabilities from DPCD
+ * @peak_pixel_rate: Cumulative peak pixel rate in kHz
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the DSC sink device's maximum pixel throughput per slice, based on
+ * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred
+ * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Note that @peak_pixel_rate is the total pixel rate transferred to the same
+ * DSC/display sink. For instance to calculate a tile's slice count of an MST
+ * multi-tiled display sink (not considering here the required
+ * rounding/alignment of slice count)::
+ *
+ * @peak_pixel_rate = tile_pixel_rate * tile_count
+ * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate)
+ * tile_slice_count = total_slice_count / tile_count
+ *
+ * Returns:
+ * The maximum pixel throughput per slice supported by the DSC sink device
+ * in kPixels/sec.
+ */
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444)
+{
+ int throughput;
+ int delta = 0;
+ int base;
+
+ throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+
+ if (is_rgb_yuv444) {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_SHIFT;
+
+ delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) &
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */
+ delta *= 2000;
+ } else {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_1_SHIFT;
+ }
+
+ switch (throughput) {
+ case 0:
+ return dsc_sink_min_slice_throughput(peak_pixel_rate);
+ case 1:
+ base = 340000;
+ break;
+ case 2 ... 14:
+ base = 400000 + 50000 * (throughput - 2);
+ break;
+ case 15:
+ base = 170000;
+ break;
+ }
+
+ return base + delta;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput);
+
+static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg)
+{
+ return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+}
+
+/**
+ * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the branch device's maximum overall DSC pixel throughput, based on
+ * the device's DPCD DSC branch capabilities, and whether the output
+ * format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Returns:
+ * - 0: The maximum overall throughput capability is not indicated by
+ * the device separately and it must be determined from the per-slice
+ * max throughput (see @drm_dp_dsc_branch_slice_max_throughput())
+ * and the maximum slice count supported by the device.
+ * - > 0: The maximum overall DSC pixel throughput supported by the branch
+ * device in kPixels/sec.
+ */
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444)
+{
+ int throughput;
+
+ if (is_rgb_yuv444)
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0);
+ else
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_1);
+
+ switch (throughput) {
+ case 0:
+ return 0;
+ case 1:
+ return 680000;
+ default:
+ return 600000 + 50000 * throughput;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput);
+
+/**
+ * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ *
+ * Return the branch device's maximum overall DSC line width, based on
+ * the device's @dsc_branch_dpcd capabilities.
+ *
+ * Returns:
+ * - 0: The maximum line width is not indicated by the device
+ * separately and it must be determined from the maximum
+ * slice count and slice-width supported by the device.
+ * - %-EINVAL: The device indicates an invalid maximum line width
+ * (< 5120 pixels).
+ * - >= 5120: The maximum line width in pixels.
+ */
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE])
+{
+ int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH);
+
+ switch (line_width) {
+ case 0:
+ return 0;
+ case 1 ... 15:
+ return -EINVAL;
+ default:
+ return line_width * 320;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width);
+
static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address,
u8 *buf, int buf_size)
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 85dbdaa4a2e2..b2cb5ae5a139 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -419,6 +419,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (property == crtc->scaling_filter_property) {
state->scaling_filter = val;
+ } else if (property == crtc->sharpness_strength_property) {
+ state->sharpness_strength = val;
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
@@ -456,6 +458,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = 0;
else if (property == crtc->scaling_filter_property)
*val = state->scaling_filter;
+ else if (property == crtc->sharpness_strength_property)
+ *val = state->sharpness_strength;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 46655339003d..a7797d260f1e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
+ * SHARPNESS_STRENGTH:
+ * Atomic property for setting the sharpness strength/intensity by userspace.
+ *
+ * The value of this property is set as an integer value ranging
+ * from 0 - 255 where:
+ *
+ * 0: Sharpness feature is disabled(default value).
+ *
+ * 1: Minimum sharpness.
+ *
+ * 255: Maximum sharpness.
+ *
+ * User can gradually increase or decrease the sharpness level and can
+ * set the optimum value depending on content.
+ * This value will be passed to kernel through the UAPI.
+ * The setting of this property does not require modeset.
+ * The sharpness effect takes place post blending on the final composed output.
+ * If the feature is disabled, the content remains same without any sharpening effect
+ * and when this feature is applied, it enhances the clarity of the content.
*/
__printf(6, 0)
@@ -940,6 +959,22 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property *prop =
+ drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255);
+
+ if (!prop)
+ return -ENOMEM;
+
+ crtc->sharpness_strength_property = prop;
+ drm_object_attach_property(&crtc->base, prop, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property);
+
/**
* drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
*
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e58c0c158b3a..84ec79b64960 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
+# FIXME: Disable tracepoints on i915 for PREEMPT_RT, unfortunately
+# it's an all or nothing flag. You cannot selectively disable
+# only some tracepoints.
+subdir-ccflags-$(CONFIG_PREEMPT_RT) += -DNOTRACE
+
subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -26,6 +31,7 @@ i915-y += \
i915_ioctl.o \
i915_irq.o \
i915_mitigations.o \
+ i915_mmio_range.o \
i915_module.o \
i915_params.o \
i915_pci.o \
@@ -228,6 +234,7 @@ i915-y += \
display/intel_bios.o \
display/intel_bo.o \
display/intel_bw.o \
+ display/intel_casf.o \
display/intel_cdclk.o \
display/intel_cmtg.o \
display/intel_color.o \
@@ -236,6 +243,7 @@ i915-y += \
display/intel_crtc.o \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
+ display/intel_dbuf_bw.o \
display/intel_display.o \
display/intel_display_conversion.o \
display/intel_display_driver.o \
@@ -248,6 +256,7 @@ i915-y += \
display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
+ display/intel_display_utils.o \
display/intel_display_wa.o \
display/intel_dmc.o \
display/intel_dmc_wl.o \
@@ -297,9 +306,11 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
+ display/skl_prefill.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o \
+ display/vlv_clock.o \
display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
@@ -346,6 +357,7 @@ i915-y += \
display/intel_gmbus.o \
display/intel_hdmi.o \
display/intel_lspcon.o \
+ display/intel_lt_phy.o \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pfit.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index aa159f9ce12f..a3ff21b2f69f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -11,7 +11,6 @@
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -20,6 +19,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 927fe56aec77..f444c5b7a27b 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -191,45 +191,46 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- /* IPS only exists on ULT machines and is tied to pipe A. */
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!display->params.enable_ips)
- return false;
-
if (crtc_state->pipe_bpp > 24)
return false;
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (display->platform.broadwell &&
- crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
- return false;
-
return true;
}
+static int _hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no IPS specific limits to worry about */
+ return 0;
+}
+
int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ int min_cdclk;
- if (!display->platform.broadwell)
+ if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
- if (!hsw_crtc_state_ips_capable(crtc_state))
+ min_cdclk = _hsw_ips_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough IPS will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+ return min_cdclk;
}
int hsw_ips_compute_config(struct intel_atomic_state *state,
@@ -244,6 +245,12 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
+ if (_hsw_ips_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ if (!display->params.enable_ips)
+ return 0;
+
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
@@ -257,18 +264,6 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
return 0;
- if (display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
- return 0;
- }
-
crtc_state->ips_enabled = true;
return 0;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 407deb5dfb57..6e39d7f2e0c2 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
@@ -19,6 +18,7 @@
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 4e0dc2d8a642..01f3803fa09f 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -2297,12 +2297,11 @@ static void i9xx_update_wm(struct intel_display *display)
crtc = single_enabled_crtc(display);
if (display->platform.i915gm && crtc) {
- struct drm_gem_object *obj;
-
- obj = intel_fb_bo(crtc->base.primary->state->fb);
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
/* self-refresh seems busted with untiled */
- if (!intel_bo_is_tiled(obj))
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
crtc = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 37faa8f19f6e..70d4c1bc70fc 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -35,7 +35,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -48,6 +47,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -1655,7 +1655,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- crtc_state->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(crtc_state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 1addd6288241..68c01932f7b4 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -11,10 +11,10 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index ed7a7ed486b5..6372f533f65b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -49,7 +49,7 @@ void intel_alpm_init(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
- mutex_init(&intel_dp->alpm_parameters.lock);
+ mutex_init(&intel_dp->alpm.lock);
}
static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
@@ -58,43 +58,32 @@ static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
1000 / 1000;
}
-static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
- int *min, int *max)
+static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
{
if (crtc_state->port_clock < 540000) {
*min = 65 * LFPS_CYCLE_COUNT;
*max = 75 * LFPS_CYCLE_COUNT;
- } else if (crtc_state->port_clock <= 810000) {
+ } else {
*min = 140;
*max = 800;
- } else {
- *min = *max = -1;
- return -1;
}
-
- return 0;
}
static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
{
- int tlfps_cycle_min, tlfps_cycle_max, ret;
+ int tlfps_cycle_min, tlfps_cycle_max;
- ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
- &tlfps_cycle_max);
- if (ret)
- return ret;
+ get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
}
static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
{
- int lfps_cycle_time = get_lfps_cycle_time(crtc_state);
-
- if (lfps_cycle_time < 0)
- return -1;
-
- return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT);
+ return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 /
+ 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -133,7 +122,7 @@ static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_s
static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
@@ -146,8 +135,6 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
silence_period = get_silence_period_symbols(crtc_state);
lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
- if (lfps_half_cycle < 0)
- return false;
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
@@ -157,15 +144,15 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
- intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
- intel_dp->alpm_parameters.silence_period_sym_clocks = silence_period;
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+ crtc_state->alpm_state.aux_less_wake_lines = aux_less_wake_lines;
+ crtc_state->alpm_state.silence_period_sym_clocks = silence_period;
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms = lfps_half_cycle;
return true;
}
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
@@ -186,7 +173,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
check_entry_lines = 15;
- intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
+ crtc_state->alpm_state.check_entry_lines = check_entry_lines;
return true;
}
@@ -217,7 +204,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
}
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
@@ -255,8 +242,8 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
+ crtc_state->alpm_state.io_wake_lines = max(io_wake_lines, 7);
+ crtc_state->alpm_state.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -270,12 +257,12 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
- if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ if (intel_dp->alpm.lobf_disable_debug) {
drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
return;
}
- if (intel_dp->alpm_parameters.sink_alpm_error)
+ if (intel_dp->alpm.sink_alpm_error)
return;
if (!intel_dp_is_edp(intel_dp))
@@ -306,9 +293,9 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_vdisplay - context_latency;
first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = intel_dp->alpm_parameters.io_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
else
- waketime_in_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
crtc_state->has_lobf = (context_latency + guardband) >
(first_sdp_position + waketime_in_lines);
@@ -325,7 +312,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
!crtc_state->has_lobf))
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
@@ -334,7 +321,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
- ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
+ ALPM_CTL_AUX_LESS_WAKE_TIME(crtc_state->alpm_state.aux_less_wake_lines);
if (intel_dp->as_sdp_supported) {
u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
@@ -352,7 +339,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(crtc_state->alpm_state.fast_wake_lines);
}
if (crtc_state->has_lobf) {
@@ -360,17 +347,17 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
}
- alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(crtc_state->alpm_state.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
- intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->alpm.transcoder = crtc_state->cpu_transcoder;
}
void intel_alpm_port_configure(struct intel_dp *intel_dp,
@@ -388,14 +375,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks);
+ crtc_state->alpm_state.silence_period_sym_clocks);
lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms);
}
intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
@@ -433,10 +420,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
continue;
if (old_crtc_state->has_lobf) {
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
}
}
@@ -530,7 +517,7 @@ i915_edp_lobf_debug_get(void *data, u64 *val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- *val = intel_dp->alpm_parameters.lobf_disable_debug;
+ *val = intel_dp->alpm.lobf_disable_debug;
return 0;
}
@@ -541,7 +528,7 @@ i915_edp_lobf_debug_set(void *data, u64 val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- intel_dp->alpm_parameters.lobf_disable_debug = val;
+ intel_dp->alpm.lobf_disable_debug = val;
return 0;
}
@@ -569,12 +556,12 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
void intel_alpm_disable(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->alpm.transcoder;
if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
@@ -585,7 +572,7 @@ void intel_alpm_disable(struct intel_dp *intel_dp)
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
drm_dbg_kms(display->drm, "Disabling ALPM\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
bool intel_alpm_get_error(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index a861c20b5d79..53599b464dea 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -17,7 +17,7 @@ struct intel_crtc;
void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3b14f929825a..a68fdbd2acb9 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -13,7 +13,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
@@ -21,6 +20,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 9e0647327710..852e4d6db8a3 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,11 +37,11 @@
#include "soc/intel_rom.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index b3e4cc9985e9..f97ccc1a96a7 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -10,26 +10,19 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
-#include "intel_atomic.h"
#include "intel_bw.h"
-#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_uncore.h"
#include "skl_watermark.h"
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
struct intel_bw_state {
struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@@ -837,49 +830,6 @@ void intel_bw_init_hw(struct intel_display *display)
icl_get_bw_info(display, dram_info, &icl_sa_info);
}
-static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
-{
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
-}
-
-static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- unsigned int data_rate = 0;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->data_rate[plane_id];
-
- if (DISPLAY_VER(display) < 11)
- data_rate += crtc_state->data_rate_y[plane_id];
- }
-
- return data_rate;
-}
-
-/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(struct intel_display *display,
- unsigned int data_rate)
-{
- if (DISPLAY_VER(display) < 12)
- return 0;
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
-}
-
static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
@@ -895,14 +845,13 @@ static unsigned int intel_bw_num_active_planes(struct intel_display *display,
static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
+ if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -1263,223 +1212,6 @@ static int intel_bw_check_qgv_points(struct intel_display *display,
old_bw_state, new_bw_state);
}
-static bool intel_dbuf_bw_changed(struct intel_display *display,
- const struct intel_dbuf_bw *old_dbuf_bw,
- const struct intel_dbuf_bw *new_dbuf_bw)
-{
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
- old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
- return true;
- }
-
- return false;
-}
-
-static bool intel_bw_state_changed(struct intel_display *display,
- const struct intel_bw_state *old_bw_state,
- const struct intel_bw_state *new_bw_state)
-{
- enum pipe pipe;
-
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *old_dbuf_bw =
- &old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_dbuf_bw =
- &new_bw_state->dbuf_bw[pipe];
-
- if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
- return true;
-
- if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
- intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
- return true;
- }
-
- return false;
-}
-
-static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- struct intel_crtc *crtc,
- enum plane_id plane_id,
- const struct skl_ddb_entry *ddb,
- unsigned int data_rate)
-{
- struct intel_display *display = to_intel_display(crtc);
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
- enum dbuf_slice slice;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
- dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
- dbuf_bw->active_planes[slice] |= BIT(plane_id);
- }
-}
-
-static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- memset(dbuf_bw, 0, sizeof(*dbuf_bw));
-
- if (!crtc_state->hw.active)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb[plane_id],
- crtc_state->data_rate[plane_id]);
-
- if (DISPLAY_VER(display) < 11)
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb_y[plane_id],
- crtc_state->data_rate[plane_id]);
- }
-}
-
-/* "Maximum Data Buffer Bandwidth" */
-static int
-intel_bw_dbuf_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- unsigned int total_max_bw = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- int num_active_planes = 0;
- unsigned int max_bw = 0;
- enum pipe pipe;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
-
- max_bw = max(dbuf_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
- }
- max_bw *= num_active_planes;
-
- total_max_bw = max(total_max_bw, max_bw);
- }
-
- return DIV_ROUND_UP(total_max_bw, 64);
-}
-
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- enum pipe pipe;
- int min_cdclk;
-
- min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
-
- for_each_pipe(display, pipe)
- min_cdclk = max(min_cdclk,
- intel_bw_crtc_min_cdclk(display,
- bw_state->data_rate[pipe]));
-
- return min_cdclk;
-}
-
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct intel_display *display = to_intel_display(state);
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- int old_min_cdclk, new_min_cdclk;
- struct intel_crtc *crtc;
- int i;
-
- if (DISPLAY_VER(display) < 9)
- return 0;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
-
- skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
- skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
-
- if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
- continue;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
- }
-
- if (!old_bw_state)
- return 0;
-
- if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= old_min_cdclk)
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
- return 0;
-
- drm_dbg_kms(display->drm,
- "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
- *need_cdclk_calc = true;
-
- return 0;
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct intel_display *display = to_intel_display(state);
@@ -1490,13 +1222,13 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
unsigned int old_data_rate =
- intel_bw_crtc_data_rate(old_crtc_state);
+ intel_crtc_bw_data_rate(old_crtc_state);
unsigned int new_data_rate =
- intel_bw_crtc_data_rate(new_crtc_state);
+ intel_crtc_bw_data_rate(new_crtc_state);
unsigned int old_active_planes =
- intel_bw_crtc_num_active_planes(old_crtc_state);
+ intel_crtc_bw_num_active_planes(old_crtc_state);
unsigned int new_active_planes =
- intel_bw_crtc_num_active_planes(new_crtc_state);
+ intel_crtc_bw_num_active_planes(new_crtc_state);
struct intel_bw_state *new_bw_state;
/*
@@ -1528,11 +1260,11 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state;
struct intel_bw_state *new_bw_state;
+ int ret;
- if (DISPLAY_VER(display) < 9)
+ if (!intel_any_crtc_active_changed(state))
return 0;
new_bw_state = intel_atomic_get_bw_state(state);
@@ -1544,13 +1276,9 @@ static int intel_bw_modeset_checks(struct intel_atomic_state *state)
new_bw_state->active_pipes =
intel_calc_active_pipes(state, old_bw_state->active_pipes);
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- int ret;
-
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
return 0;
}
@@ -1600,7 +1328,7 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
bool changed = false;
@@ -1611,11 +1339,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
if (DISPLAY_VER(display) < 9)
return 0;
- if (any_ms) {
- ret = intel_bw_modeset_checks(state);
- if (ret)
- return ret;
- }
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
ret = intel_bw_check_sagv_mask(state);
if (ret)
@@ -1658,9 +1384,9 @@ static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
+ intel_crtc_bw_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
+ intel_crtc_bw_num_active_planes(crtc_state);
drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -1691,8 +1417,6 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
- skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
-
/* initially SAGV has been forced off */
bw_state->pipe_sagv_reject |= BIT(pipe);
}
@@ -1710,7 +1434,6 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index d51f50c9d302..99b447388245 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -28,11 +28,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct intel_display *display);
int intel_bw_init(struct intel_display *display);
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
new file mode 100644
index 000000000000..95339b496f24
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+
+#define MAX_PIXELS_FOR_3_TAP_FILTER (1920 * 1080)
+#define MAX_PIXELS_FOR_5_TAP_FILTER (3840 * 2160)
+
+#define FILTER_COEFF_0_125 125
+#define FILTER_COEFF_0_25 250
+#define FILTER_COEFF_0_5 500
+#define FILTER_COEFF_1_0 1000
+#define FILTER_COEFF_0_0 0
+#define SET_POSITIVE_SIGN(x) ((x) & (~SIGN))
+
+/**
+ * DOC: Content Adaptive Sharpness Filter (CASF)
+ *
+ * Starting from LNL the display engine supports an
+ * adaptive sharpening filter, enhancing the image
+ * quality. The display hardware utilizes the second
+ * pipe scaler for implementing CASF.
+ * If sharpness is being enabled then pipe scaling
+ * cannot be used.
+ * This filter operates on a region of pixels based
+ * on the tap size. Coefficients are used to generate
+ * an alpha value which blends the sharpened image to
+ * original image.
+ */
+
+/* Default LUT values to be loaded one time. */
+static const u16 sharpness_lut[] = {
+ 4095, 2047, 1364, 1022, 816, 678, 579,
+ 504, 444, 397, 357, 323, 293, 268, 244, 224,
+ 204, 187, 170, 154, 139, 125, 111, 98, 85,
+ 73, 60, 48, 36, 24, 12, 0
+};
+
+const u16 filtercoeff_1[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_0, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_0,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_2[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_3[] = {
+ FILTER_COEFF_0_125, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_125,
+};
+
+static void intel_casf_filter_lut_load(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int i;
+
+ intel_de_write(display, SHRPLUT_INDEX(crtc->pipe),
+ INDEX_AUTO_INCR | INDEX_VALUE(0));
+
+ for (i = 0; i < ARRAY_SIZE(sharpness_lut); i++)
+ intel_de_write(display, SHRPLUT_DATA(crtc->pipe),
+ sharpness_lut[i]);
+}
+
+void intel_casf_update_strength(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int win_size;
+
+ intel_de_rmw(display, SHARPNESS_CTL(crtc->pipe), FILTER_STRENGTH_MASK,
+ FILTER_STRENGTH(crtc_state->hw.casf_params.strength));
+
+ win_size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, 1));
+
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, 1), win_size);
+}
+
+static void intel_casf_compute_win_size(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->hw.adjusted_mode;
+ u32 total_pixels = mode->hdisplay * mode->vdisplay;
+
+ if (total_pixels <= MAX_PIXELS_FOR_3_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_3X3;
+ else if (total_pixels <= MAX_PIXELS_FOR_5_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_5X5;
+ else
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_7X7;
+}
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!HAS_CASF(display))
+ return 0;
+
+ if (crtc_state->uapi.sharpness_strength == 0) {
+ crtc_state->hw.casf_params.casf_enable = false;
+ crtc_state->hw.casf_params.strength = 0;
+ return 0;
+ }
+
+ crtc_state->hw.casf_params.casf_enable = true;
+
+ /*
+ * HW takes a value in form (1.0 + strength) in 4.4 fixed format.
+ * Strength is from 0.0-14.9375 ie from 0-239.
+ * User can give value from 0-255 but is clamped to 239.
+ * Ex. User gives 85 which is 5.3125 and adding 1.0 gives 6.3125.
+ * 6.3125 in 4.4 format is b01100101 which is equal to 101.
+ * Also 85 + 16 = 101.
+ */
+ crtc_state->hw.casf_params.strength =
+ min(crtc_state->uapi.sharpness_strength, 0xEF) + 0x10;
+
+ intel_casf_compute_win_size(crtc_state);
+
+ intel_casf_scaler_compute_config(crtc_state);
+
+ return 0;
+}
+
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharp;
+
+ sharp = intel_de_read(display, SHARPNESS_CTL(crtc->pipe));
+ if (sharp & FILTER_EN) {
+ if (drm_WARN_ON(display->drm,
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp) < 16))
+ crtc_state->hw.casf_params.strength = 0;
+ else
+ crtc_state->hw.casf_params.strength =
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp);
+ crtc_state->hw.casf_params.casf_enable = true;
+ crtc_state->hw.casf_params.win_size =
+ REG_FIELD_GET(FILTER_SIZE_MASK, sharp);
+ }
+}
+
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.casf_params.casf_enable)
+ return true;
+
+ return false;
+}
+
+static int casf_coeff_tap(int i)
+{
+ return i % SCALER_FILTER_NUM_TAPS;
+}
+
+static u32 casf_coeff(struct intel_crtc_state *crtc_state, int t)
+{
+ struct scaler_filter_coeff value;
+ u32 coeff;
+
+ value = crtc_state->hw.casf_params.coeff[t];
+ value.sign = 0;
+
+ coeff = value.sign << 15 | value.exp << 12 | value.mantissa << 3;
+ return coeff;
+}
+
+/*
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * To enable casf: program scaler coefficients with the coeffients
+ * that are calculated and stored in hw.casf_params.coeff as per
+ * SCALER_COEFFICIENT_FORMAT
+ */
+static void intel_casf_write_coeff(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int id = crtc_state->scaler_state.scaler_id;
+ int i;
+
+ if (id != 1) {
+ drm_WARN(display->drm, 0, "Second scaler not enabled\n");
+ return;
+ }
+
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(crtc->pipe, id, 0),
+ PS_COEF_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * SCALER_FILTER_NUM_TAPS; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = casf_coeff_tap(i);
+ tmp = casf_coeff(crtc_state, t);
+
+ t = casf_coeff_tap(i + 1);
+ tmp |= casf_coeff(crtc_state, t) << 16;
+
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(crtc->pipe, id, 0),
+ tmp);
+ }
+}
+
+static void convert_sharpness_coef_binary(struct scaler_filter_coeff *coeff,
+ u16 coefficient)
+{
+ if (coefficient < 25) {
+ coeff->mantissa = (coefficient * 2048) / 100;
+ coeff->exp = 3;
+ } else if (coefficient < 50) {
+ coeff->mantissa = (coefficient * 1024) / 100;
+ coeff->exp = 2;
+ } else if (coefficient < 100) {
+ coeff->mantissa = (coefficient * 512) / 100;
+ coeff->exp = 1;
+ } else {
+ coeff->mantissa = (coefficient * 256) / 100;
+ coeff->exp = 0;
+ }
+}
+
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state)
+{
+ const u16 *filtercoeff;
+ u16 filter_coeff[SCALER_FILTER_NUM_TAPS];
+ u16 sumcoeff = 0;
+ int i;
+
+ if (crtc_state->hw.casf_params.win_size == 0)
+ filtercoeff = filtercoeff_1;
+ else if (crtc_state->hw.casf_params.win_size == 1)
+ filtercoeff = filtercoeff_2;
+ else
+ filtercoeff = filtercoeff_3;
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++)
+ sumcoeff += *(filtercoeff + i);
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) {
+ filter_coeff[i] = (*(filtercoeff + i) * 100 / sumcoeff);
+ convert_sharpness_coef_binary(&crtc_state->hw.casf_params.coeff[i],
+ filter_coeff[i]);
+ }
+}
+
+void intel_casf_enable(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharpness_ctl;
+
+ intel_casf_filter_lut_load(crtc, crtc_state);
+
+ intel_casf_write_coeff(crtc_state);
+
+ sharpness_ctl = FILTER_EN | FILTER_STRENGTH(crtc_state->hw.casf_params.strength);
+
+ sharpness_ctl |= crtc_state->hw.casf_params.win_size;
+
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), sharpness_ctl);
+
+ skl_scaler_setup_casf(crtc_state);
+}
+
+void intel_casf_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_de_write(display, SKL_PS_CTRL(crtc->pipe, 1), 0);
+ intel_de_write(display, SKL_PS_WIN_POS(crtc->pipe, 1), 0);
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), 0);
+ intel_de_write(display, SKL_PS_WIN_SZ(crtc->pipe, 1), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_casf.h b/drivers/gpu/drm/i915/display/intel_casf.h
new file mode 100644
index 000000000000..b3fb0bcb3f5b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_H__
+#define __INTEL_CASF_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state);
+void intel_casf_update_strength(struct intel_crtc_state *new_crtc_state);
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state);
+void intel_casf_enable(struct intel_crtc_state *crtc_state);
+void intel_casf_disable(const struct intel_crtc_state *crtc_state);
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state);
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_CASF_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_casf_regs.h b/drivers/gpu/drm/i915/display/intel_casf_regs.h
new file mode 100644
index 000000000000..87803cca510f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_REGS_H__
+#define __INTEL_CASF_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _SHARPNESS_CTL_A 0x682B0
+#define _SHARPNESS_CTL_B 0x68AB0
+#define SHARPNESS_CTL(pipe) _MMIO_PIPE(pipe, _SHARPNESS_CTL_A, _SHARPNESS_CTL_B)
+#define FILTER_EN REG_BIT(31)
+#define FILTER_STRENGTH_MASK REG_GENMASK(15, 8)
+#define FILTER_STRENGTH(x) REG_FIELD_PREP(FILTER_STRENGTH_MASK, (x))
+#define FILTER_SIZE_MASK REG_GENMASK(1, 0)
+#define SHARPNESS_FILTER_SIZE_3X3 REG_FIELD_PREP(FILTER_SIZE_MASK, 0)
+#define SHARPNESS_FILTER_SIZE_5X5 REG_FIELD_PREP(FILTER_SIZE_MASK, 1)
+#define SHARPNESS_FILTER_SIZE_7X7 REG_FIELD_PREP(FILTER_SIZE_MASK, 2)
+
+#define _SHRPLUT_DATA_A 0x682B8
+#define _SHRPLUT_DATA_B 0x68AB8
+#define SHRPLUT_DATA(pipe) _MMIO_PIPE(pipe, _SHRPLUT_DATA_A, _SHRPLUT_DATA_B)
+
+#define _SHRPLUT_INDEX_A 0x682B4
+#define _SHRPLUT_INDEX_B 0x68AB4
+#define SHRPLUT_INDEX(pipe) _MMIO_PIPE(pipe, _SHRPLUT_INDEX_A, _SHRPLUT_INDEX_B)
+#define INDEX_AUTO_INCR REG_BIT(10)
+#define INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define INDEX_VALUE(x) REG_FIELD_PREP(INDEX_VALUE_MASK, (x))
+
+#endif /* __INTEL_CASF_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 261c7d7aa311..bdb42fcc4cb2 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -33,15 +33,15 @@
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
-#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
@@ -50,6 +50,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_clock.h"
#include "vlv_dsi.h"
#include "vlv_sideband.h"
@@ -133,8 +134,8 @@ struct intel_cdclk_state {
*/
struct intel_cdclk_config actual;
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
+ /* minimum acceptable cdclk to satisfy DBUF bandwidth requirements */
+ int dbuf_bw_min_cdclk;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -146,6 +147,9 @@ struct intel_cdclk_state {
/* forced minimum cdclk for glk+ audio w/a */
int force_min_cdclk;
+ /* bitmask of enabled pipes */
+ u8 enabled_pipes;
+
/* bitmask of active pipes */
u8 active_pipes;
@@ -564,8 +568,7 @@ static void hsw_get_cdclk(struct intel_display *display,
static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ?
333333 : 320000;
/*
@@ -585,8 +588,6 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.valleyview) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
@@ -600,7 +601,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1;
}
}
@@ -609,17 +610,12 @@ static void vlv_get_cdclk(struct intel_display *display,
{
u32 val;
- vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
- cdclk_config->vco = vlv_get_hpll_vco(display->drm);
- cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_config->vco);
+ cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm);
+ vlv_punit_get(display->drm);
val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
-
- vlv_iosf_sb_put(display->drm,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_punit_put(display->drm);
if (display->platform.valleyview)
cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -631,7 +627,6 @@ static void vlv_get_cdclk(struct intel_display *display,
static void vlv_program_pfi_credits(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
if (display->platform.cherryview)
@@ -639,7 +634,7 @@ static void vlv_program_pfi_credits(struct intel_display *display)
else
default_credits = PFI_CREDIT(8);
- if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) {
/* CHV suggested value is 31 or 63 */
if (display->platform.cherryview)
credits = PFI_CREDIT_63;
@@ -671,7 +666,6 @@ static void vlv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -716,7 +710,7 @@ static void vlv_set_cdclk(struct intel_display *display,
if (cdclk == 400000) {
u32 divider;
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1,
cdclk) - 1;
/* adjust cdclk divider */
@@ -1568,7 +1562,7 @@ static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk)
drm_WARN(display->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
min_cdclk, display->cdclk.hw.ref);
- return 0;
+ return display->cdclk.max_cdclk_freq;
}
static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk)
@@ -2601,6 +2595,12 @@ static void intel_set_cdclk(struct intel_display *display,
}
}
+static bool dg2_power_well_count(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.dg2 ? hweight8(cdclk_state->active_pipes) : 0;
+}
+
static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -2613,16 +2613,16 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual) &&
- new_cdclk_state->active_pipes ==
- old_cdclk_state->active_pipes)
+ dg2_power_well_count(display, old_cdclk_state) ==
+ dg2_power_well_count(display, new_cdclk_state))
return;
/* According to "Sequence Before Frequency Change", voltage level set to 0x3 */
voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX;
change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) >
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) >
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence Before Frequency Change",
@@ -2640,7 +2640,7 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
* no action if it is decreasing, before the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
change_cdclk, update_pipe_count);
@@ -2660,8 +2660,8 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
voltage_level = new_cdclk_state->actual.voltage_level;
update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) <
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) <
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence After Frequency Change",
@@ -2677,7 +2677,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
* no action if it is increasing, after the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
update_cdclk, update_pipe_count);
@@ -2712,6 +2712,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2764,6 +2767,9 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2801,16 +2807,20 @@ static int intel_cdclk_guardband(struct intel_display *display)
return 90;
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+static int _intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state, int pixel_rate)
{
struct intel_display *display = to_intel_display(crtc_state);
int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
int guardband = intel_cdclk_guardband(display);
- int pixel_rate = crtc_state->pixel_rate;
return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ return _intel_pixel_rate_to_cdclk(crtc_state, crtc_state->pixel_rate);
+}
+
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2819,12 +2829,12 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int min_cdclk = 0;
for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
+ min_cdclk = max(min_cdclk, crtc_state->plane_min_cdclk[plane->id]);
return min_cdclk;
}
-static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -2832,6 +2842,8 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
+ min_cdclk = max(min_cdclk, intel_crtc_bw_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_fbc_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
@@ -2841,51 +2853,110 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_cdclk_update_crtc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
{
struct intel_display *display = to_intel_display(state);
- struct intel_cdclk_state *cdclk_state =
- intel_atomic_get_new_cdclk_state(state);
- const struct intel_bw_state *bw_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- int min_cdclk, i;
- enum pipe pipe;
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (min_cdclk < 0)
- return min_cdclk;
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
- continue;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ old_min_cdclk = cdclk_state->min_cdclk[crtc->pipe];
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- bw_state = intel_atomic_get_new_bw_state(state);
- if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->bw_min_cdclk != min_cdclk) {
- int ret;
+ cdclk_state->min_cdclk[crtc->pipe] = new_min_cdclk;
- cdclk_state->bw_min_cdclk = min_cdclk;
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
- }
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] min cdclk: %d kHz -> %d kHz\n",
+ crtc->base.base.id, crtc->base.name,
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
- min_cdclk = max(cdclk_state->force_min_cdclk,
- cdclk_state->bw_min_cdclk);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ old_min_cdclk = cdclk_state->dbuf_bw_min_cdclk;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state->dbuf_bw_min_cdclk = new_min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "dbuf bandwidth min cdclk: %d kHz -> %d kHz\n",
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
+
+static bool glk_cdclk_audio_wa_needed(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.geminilake &&
+ cdclk_state->enabled_pipes &&
+ !is_power_of_2(cdclk_state->enabled_pipes);
+}
+
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe;
+ int min_cdclk;
+
+ min_cdclk = cdclk_state->force_min_cdclk;
+ min_cdclk = max(min_cdclk, cdclk_state->dbuf_bw_min_cdclk);
for_each_pipe(display, pipe)
min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
@@ -2897,8 +2968,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
- if (display->platform.geminilake && cdclk_state->active_pipes &&
- !is_power_of_2(cdclk_state->active_pipes))
+ if (glk_cdclk_audio_wa_needed(display, cdclk_state))
min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
@@ -3184,41 +3254,69 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
return to_intel_cdclk_state(cdclk_state);
}
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_cdclk_modeset_checks(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
+ struct intel_cdclk_state *new_cdclk_state;
int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially affected
- * planes are part of the state. We can now compute the minimum cdclk
- * for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
+ if (!intel_any_crtc_enable_changed(state) &&
+ !intel_any_crtc_active_changed(state))
+ return 0;
- ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->enabled_pipes =
+ intel_calc_enabled_pipes(state, old_cdclk_state->enabled_pipes);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!old_cdclk_state->active_pipes != !new_cdclk_state->active_pipes)
+ *need_cdclk_calc = true;
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ if (glk_cdclk_audio_wa_needed(display, old_cdclk_state) !=
+ glk_cdclk_audio_wa_needed(display, new_cdclk_state))
+ *need_cdclk_calc = true;
+
+ if (dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state))
*need_cdclk_calc = true;
return 0;
}
+static int intel_crtcs_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = intel_cdclk_update_crtc_min_cdclk(state, crtc,
+ old_crtc_state->min_cdclk,
+ new_crtc_state->min_cdclk,
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
{
struct intel_cdclk_state *cdclk_state;
@@ -3251,18 +3349,17 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
- bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
- hweight8(new_cdclk_state->active_pipes);
- bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
- &new_cdclk_state->actual);
/*
- * We need to poke hw for gen >= 12, because we notify PCode if
+ * We need to poke hw for DG2, because we notify PCode if
* pipe power well count changes.
*/
- return cdclk_changed || (display->platform.dg2 && power_well_cnt_changed);
+ return intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual) ||
+ dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state);
}
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
@@ -3276,9 +3373,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state->active_pipes =
- intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
-
ret = intel_cdclk_modeset_calc_cdclk(state);
if (ret)
return ret;
@@ -3291,9 +3385,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
- intel_cdclk_changed(&old_cdclk_state->logical,
+ } else if (intel_cdclk_changed(&old_cdclk_state->logical,
&new_cdclk_state->logical)) {
ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
@@ -3375,14 +3467,55 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+int intel_cdclk_atomic_check(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
+ bool need_cdclk_calc = false;
+ int ret;
+
+ ret = intel_cdclk_modeset_checks(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_crtcs_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_dbuf_bw_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+
+ need_cdclk_calc = true;
+ }
+
+ if (need_cdclk_calc) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void intel_cdclk_update_hw_state(struct intel_display *display)
{
- const struct intel_bw_state *bw_state =
- to_intel_bw_state(display->bw.obj.state);
+ const struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
+ cdclk_state->enabled_pipes = 0;
cdclk_state->active_pipes = 0;
for_each_intel_crtc(display->drm, crtc) {
@@ -3390,14 +3523,16 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
+ if (crtc_state->hw.enable)
+ cdclk_state->enabled_pipes |= BIT(pipe);
if (crtc_state->hw.active)
cdclk_state->active_pipes |= BIT(pipe);
- cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
+ cdclk_state->min_cdclk[pipe] = crtc_state->min_cdclk;
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
- cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ cdclk_state->dbuf_bw_min_cdclk = intel_dbuf_bw_min_cdclk(display, dbuf_bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3566,13 +3701,6 @@ static int pch_rawclk(struct intel_display *display)
return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-static int vlv_hrawclk(struct intel_display *display)
-{
- /* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
- CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
static int i9xx_hrawclk(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
@@ -3606,7 +3734,7 @@ u32 intel_read_rawclk(struct intel_display *display)
else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
- freq = vlv_hrawclk(display);
+ freq = vlv_clock_get_hrawclk(display->drm);
else if (DISPLAY_VER(display) >= 3)
freq = i9xx_hrawclk(display);
else
@@ -3898,11 +4026,6 @@ int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe
return cdclk_state->min_cdclk[pipe];
}
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
-{
- return cdclk_state->bw_min_cdclk;
-}
-
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
{
const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
@@ -3934,3 +4057,75 @@ void intel_cdclk_read_hw(struct intel_display *display)
cdclk_state->actual = display->cdclk.hw;
cdclk_state->logical = display->cdclk.hw;
}
+
+static int calc_cdclk(const struct intel_crtc_state *crtc_state, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton) {
+ return bxt_calc_cdclk(display, min_cdclk);
+ } else if (DISPLAY_VER(display) == 9) {
+ int vco;
+
+ vco = display->cdclk.skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+
+ return skl_calc_cdclk(min_cdclk, vco);
+ } else if (display->platform.broadwell) {
+ return bdw_calc_cdclk(min_cdclk);
+ } else if (display->platform.cherryview || display->platform.valleyview) {
+ return vlv_calc_cdclk(display, min_cdclk);
+ } else {
+ return display->cdclk.max_cdclk_freq;
+ }
+}
+
+static unsigned int _intel_cdclk_prefill_adj(const struct intel_crtc_state *crtc_state,
+ int clock, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int cdclk = calc_cdclk(crtc_state, min_cdclk);
+
+ return min(0x10000, DIV_ROUND_UP_ULL((u64)clock << 16, ppc * cdclk));
+}
+
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /* FIXME use the actual min_cdclk for the pipe here */
+ return intel_cdclk_prefill_adjustment_worst(crtc_state);
+}
+
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ int clock = crtc_state->hw.pipe_mode.crtc_clock;
+ int min_cdclk;
+
+ /*
+ * FIXME could perhaps consider a few more of the factors
+ * that go the per-crtc min_cdclk. Namely anything that
+ * only changes during full modesets.
+ *
+ * FIXME this assumes 1:1 scaling, but the other _worst() stuff
+ * assumes max downscaling, so the final result will be
+ * unrealistically bad. Figure out where the actual maximum value
+ * lies and use that to compute a more realistic worst case
+ * estimate...
+ */
+ min_cdclk = _intel_pixel_rate_to_cdclk(crtc_state, clock);
+
+ return _intel_cdclk_prefill_adj(crtc_state, clock, min_cdclk);
+}
+
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, prefill_lines_unadjusted),
+ ppc * prefill_lines_available);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cacee598af0e..1ff7d078b42c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -38,16 +38,17 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context);
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state);
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
void intel_cdclk_update_hw_state(struct intel_display *display);
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
@@ -64,9 +65,16 @@ int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
void intel_cdclk_read_hw(struct intel_display *display);
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available);
+
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 671db6926e4c..1e97020e7304 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -24,12 +24,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
@@ -2013,7 +2013,7 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_dsb_wait_for_delayed_vblank(state, crtc_state->dsb_color);
intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
intel_dsb_interrupt(crtc_state->dsb_color);
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 112749f97c26..f401558ac14e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define for_each_combo_phy(__display, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 79bda1c6ca26..913d90a7a508 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -32,7 +32,7 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
-#include "i915_utils.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 0d628762dc85..9d2a23c96c61 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -85,8 +85,13 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!crtc->active)
return 0;
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ if (!vblank->max_vblank_count) {
+ /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return (u32)drm_crtc_vblank_count(&crtc->base);
+ else
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ }
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
@@ -391,6 +396,9 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+ if (HAS_CASF(display))
+ drm_crtc_create_sharpness_strength_property(&crtc->base);
+
return 0;
fail:
@@ -749,3 +757,89 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
out:
intel_psr_unlock(new_crtc_state);
}
+
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.enable != new_crtc_state->hw.enable;
+}
+
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.active != new_crtc_state->hw.active;
+}
+
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_active_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(display) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+/* "Maximum Pipe Read Bandwidth" */
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index 8c14ff8b391e..07917e8a9ae3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -58,4 +58,15 @@ void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe);
void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state);
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state);
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state);
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state);
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 0c7f91046996..c2a6217c2262 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -289,10 +289,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "scanline offset: %d\n",
intel_crtc_scanline_offset(pipe_config));
- drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->hw.adjusted_mode.crtc_vblank_start -
- pipe_config->hw.adjusted_mode.crtc_vdisplay,
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d, set context latency: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay,
+ pipe_config->set_context_latency);
drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
@@ -313,9 +312,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
- drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d, min cdclk %d\n",
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
+ pipe_config->pixel_rate, pipe_config->min_cdclk);
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
@@ -373,6 +372,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_vdsc_state_dump(&p, 0, pipe_config);
+ drm_printf(&p, "sharpness strength: %d, sharpness tap size: %d, sharpness enable: %d\n",
+ pipe_config->hw.casf_params.strength,
+ pipe_config->hw.casf_params.win_size,
+ pipe_config->hw.casf_params.casf_enable);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d4d181f9dca5..7aa14348aa6d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -12,13 +12,13 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
@@ -662,7 +662,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (width != height)
+ if (DISPLAY_VER(display) < 14 && width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
base = plane_state->surf;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 801235a5bc0a..b3b506d0e040 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -16,16 +15,15 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
-#define MB_WRITE_COMMITTED true
-#define MB_WRITE_UNCOMMITTED false
-
#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
for ((__lane) = 0; (__lane) < 2; (__lane)++) \
for_each_if((__lane_mask) & BIT(__lane))
@@ -39,14 +37,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- /* PTL doesn't have a PHY connected to PORT B; as such,
- * there will never be a case where PTL uses PHY B.
- * WCL uses PORT A and B with the C10 PHY.
- * Reusing the condition for WCL and extending it for PORT B
- * should not cause any issues for PTL.
- */
- if (display->platform.pantherlake && phy < PHY_C)
- return true;
+ if (display->platform.pantherlake) {
+ if (display->platform.pantherlake_wildcatlake)
+ return phy <= PHY_B;
+ else
+ return phy == PHY_A;
+ }
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
@@ -130,8 +126,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -140,7 +136,7 @@ static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -161,8 +157,8 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
- int command, int lane, u32 *val)
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -273,8 +269,7 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder,
return 0;
}
-static u8 intel_cx0_read(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr)
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
@@ -361,8 +356,8 @@ static void __intel_cx0_write(struct intel_encoder *encoder,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 data, bool committed)
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
@@ -414,8 +409,8 @@ static void __intel_cx0_rmw(struct intel_encoder *encoder,
__intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
@@ -2105,6 +2100,9 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state);
+
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
@@ -2129,6 +2127,8 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
+
+ pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
}
static void intel_c10_pll_program(struct intel_display *display,
@@ -2587,20 +2587,6 @@ static bool is_dp2(u32 clock)
return false;
}
-static bool is_hdmi_frl(u32 clock)
-{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
-}
-
static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -2614,7 +2600,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
{
if (dp && is_dp2(clock))
return 2;
- else if (is_hdmi_frl(clock))
+ else if (intel_hdmi_is_frl(clock))
return 1;
else
return 0;
@@ -2626,11 +2612,13 @@ static void intel_c20_pll_program(struct intel_display *display,
bool is_dp, int port_clock)
{
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+ u8 serdes;
bool cntx;
int i;
/* 1. Read current context selection */
- cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) &
+ PHY_C20_CONTEXT_TOGGLE;
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2700,28 +2688,31 @@ static void intel_c20_pll_program(struct intel_display *display,
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
- if (is_dp) {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)),
- MB_WRITE_COMMITTED);
- } else {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(port_clock) ? BIT(7) : 0,
- MB_WRITE_COMMITTED);
+ serdes = 0;
+ if (is_dp)
+ serdes = PHY_C20_IS_DP |
+ PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock));
+ else if (intel_hdmi_is_frl(port_clock))
+ serdes = PHY_C20_IS_HDMI_FRL;
- intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(port_clock),
- MB_WRITE_COMMITTED);
- }
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL,
+ serdes,
+ MB_WRITE_COMMITTED);
+
+ if (!is_dp)
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ PHY_C20_HDMI_RATE_MASK,
+ intel_c20_get_hdmi_rate(port_clock),
+ MB_WRITE_COMMITTED);
/*
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
+ PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE,
+ MB_WRITE_COMMITTED);
}
static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
@@ -2768,7 +2759,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (!is_dp && is_hdmi_frl(port_clock))
+ if (!is_dp && intel_hdmi_is_frl(port_clock))
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
@@ -2808,8 +2799,8 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
- u8 lane_mask, u8 state)
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2839,24 +2830,24 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
/* Update Timeout Value */
if (intel_de_wait_custom(display, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 2, NULL))
drm_warn(display->drm,
"PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
-static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
XELPDP_POWER_STATE_READY_MASK,
- XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+ XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
- XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+ XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) |
XELPDP_PLL_LANE_STAGGERING_DELAY(0));
}
@@ -2929,7 +2920,7 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_RESET);
+ XELPDP_P2_STATE_RESET);
intel_cx0_setup_powerdown(encoder);
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
@@ -3034,7 +3025,7 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder,
* TODO: For DP alt mode use only one lane.
*/
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_READY);
+ XELPDP_P2_STATE_READY);
/*
* 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
@@ -3160,8 +3151,8 @@ static int intel_mtl_tbt_clock_select(struct intel_display *display,
}
}
-static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3275,13 +3266,13 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
if (intel_encoder_is_c10phy(encoder))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
if ((display->platform.battlemage && encoder->port == PORT_A) ||
(DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
- return CX0_P4PG_STATE_DISABLE;
+ return XELPDP_P4PG_STATE_DISABLE;
}
static void intel_cx0pll_disable(struct intel_encoder *encoder)
@@ -3345,7 +3336,7 @@ static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder)
intel_cx0_get_pclk_pll_request(lane);
}
-static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3584,7 +3575,7 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- if (DISPLAY_VER(display) < 14)
+ if (!IS_DISPLAY_VER(display, 14, 30))
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c5a7b529955b..84d334b865f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define MB_WRITE_COMMITTED true
+#define MB_WRITE_UNCOMMITTED false
+
enum icl_port_dpll_id;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -19,6 +22,8 @@ struct intel_display;
struct intel_encoder;
struct intel_hdmi;
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
@@ -41,9 +46,25 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state);
+int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder);
+bool intel_cx0_is_hdmi_frl(u32 clock);
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr);
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed);
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed);
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val);
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 77eae1d845f7..635b35669348 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -50,6 +50,7 @@
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define XELPDP_PORT_P2P_TRANSACTION_PENDING REG_BIT(24)
#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16)
#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
@@ -104,6 +105,8 @@
#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1)
#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2)
#define XELPDP_PORT_REVERSAL REG_BIT(16)
+#define XE3PLPDP_PHY_MODE_MASK REG_GENMASK(15, 12)
+#define XE3PLPDP_PHY_MODE_DP REG_FIELD_PREP(XE3PLPDP_PHY_MODE_MASK, 0x3)
#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11)
#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7)
#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6)
@@ -124,6 +127,7 @@
_XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
+#define XE3PLPDP_LANE_PHY_PULSE_STATUS(lane) _PICK(lane, REG_BIT(27), REG_BIT(26))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20)
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
@@ -149,11 +153,12 @@
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
-#define CX0_P0_STATE_ACTIVE 0x0
-#define CX0_P2_STATE_READY 0x2
-#define CX0_P2PG_STATE_DISABLE 0x9
-#define CX0_P4PG_STATE_DISABLE 0xC
-#define CX0_P2_STATE_RESET 0x2
+#define XELPDP_P0_STATE_ACTIVE 0x0
+#define XELPDP_P2_STATE_READY 0x2
+#define XE3PLPD_P4_STATE_DISABLE 0x4
+#define XELPDP_P2PG_STATE_DISABLE 0x9
+#define XELPDP_P4PG_STATE_DISABLE 0xC
+#define XELPDP_P2_STATE_RESET 0x2
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_A 0x640d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
@@ -298,10 +303,14 @@
#define PHY_C20_RD_DATA_L 0xC08
#define PHY_C20_RD_DATA_H 0xC09
#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00
-#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_IS_HDMI_FRL REG_BIT8(7)
+#define PHY_C20_IS_DP REG_BIT8(6)
+#define PHY_C20_DP_RATE_MASK REG_GENMASK8(4, 1)
+#define PHY_C20_DP_RATE(val) REG_FIELD_PREP8(PHY_C20_DP_RATE_MASK, val)
#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0)
-#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1)
-#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val)
+#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_HDMI_RATE_MASK REG_GENMASK8(1, 0)
+#define PHY_C20_HDMI_RATE(val) REG_FIELD_PREP8(PHY_C20_HDMI_RATE_MASK, val)
#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02
#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val)
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.c b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
new file mode 100644
index 000000000000..8b8894c37f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_dbuf_bw.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_dbuf_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+};
+
+struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_dbuf_bw_state, base);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj);
+ if (IS_ERR(dbuf_bw_state))
+ return ERR_CAST(dbuf_bw_state);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dbuf_bw_state_changed(struct intel_display *display,
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state,
+ const struct intel_dbuf_bw_state *new_dbuf_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
+ &old_dbuf_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_dbuf_bw =
+ &new_dbuf_bw_state->dbuf_bw[pipe];
+
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
+
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
+ }
+}
+
+/* "Maximum Data Buffer Bandwidth" */
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe];
+
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
+ }
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
+ }
+
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
+
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL;
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
+ new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state);
+ if (IS_ERR(new_dbuf_bw_state))
+ return PTR_ERR(new_dbuf_bw_state);
+
+ old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state);
+
+ new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
+ }
+
+ if (!old_dbuf_bw_state)
+ return 0;
+
+ if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_cdclk_update_dbuf_bw_min_cdclk(state,
+ intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state),
+ intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state),
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void intel_dbuf_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state);
+ }
+}
+
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe]));
+}
+
+static struct intel_global_state *
+intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ return &state->base;
+}
+
+static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_bw_funcs = {
+ .atomic_duplicate_state = intel_dbuf_bw_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_bw_destroy_state,
+};
+
+int intel_dbuf_bw_init(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(display, &display->dbuf_bw.obj,
+ &state->base, &intel_dbuf_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.h b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
new file mode 100644
index 000000000000..61875b9d5969
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_DBUF_BW_H__
+#define __INTEL_DBUF_BW_H__
+
+#include <drm/drm_atomic.h>
+
+struct intel_atomic_state;
+struct intel_dbuf_bw_state;
+struct intel_crtc;
+struct intel_display;
+struct intel_global_state;
+
+struct intel_dbuf_bw_state *
+to_intel_dbuf_bw_state(struct intel_global_state *obj_state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state);
+
+int intel_dbuf_bw_init(struct intel_display *display);
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state);
+void intel_dbuf_bw_update_hw_state(struct intel_display *display);
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DBUF_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c09aa759f4d4..733ef4559131 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -35,7 +35,6 @@
#include <drm/drm_privacy_screen_consumer.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "intel_alpm.h"
#include "intel_audio.h"
@@ -53,6 +52,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -72,6 +72,7 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lspcon.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
#include "intel_panel.h"
@@ -1466,10 +1467,15 @@ static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const u8 *signal_array;
+ size_t array_size;
int i;
- for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
- if (index_to_dp_signal_levels[i] == signal_levels)
+ signal_array = index_to_dp_signal_levels;
+ array_size = ARRAY_SIZE(index_to_dp_signal_levels);
+
+ for (i = 0; i < array_size; i++) {
+ if (signal_array[i] == signal_levels)
return i;
}
@@ -4240,6 +4246,19 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
&crtc_state->dpll_hw_state);
}
+static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
+
+ if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
+ crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
+ else
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -4559,6 +4578,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
+ int ret = 0;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = intel_dp_compute_config_late(encoder, crtc_state, conn_state);
+
+ if (ret)
+ return ret;
drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
@@ -5224,7 +5250,12 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->enable_clock = intel_xe3plpd_pll_enable;
+ encoder->disable_clock = intel_xe3plpd_pll_disable;
+ encoder->port_pll_type = intel_mtl_port_pll_type;
+ encoder->get_config = xe3plpd_ddi_get_config;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
@@ -5289,7 +5320,9 @@ void intel_ddi_init(struct intel_display *display,
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->set_signal_levels = intel_lt_phy_set_signal_levels;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
} else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index a238be5bc455..395dba8c9e4d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -3,13 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_lt_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1115,6 +1116,69 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
.num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
};
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_dp14[] = {
+ { .lt = { 1, 0, 0, 21, 0 } },
+ { .lt = { 1, 1, 0, 24, 3 } },
+ { .lt = { 1, 2, 0, 28, 7 } },
+ { .lt = { 0, 3, 0, 35, 13 } },
+ { .lt = { 1, 1, 0, 27, 0 } },
+ { .lt = { 1, 2, 0, 31, 4 } },
+ { .lt = { 0, 3, 0, 39, 9 } },
+ { .lt = { 1, 2, 0, 35, 0 } },
+ { .lt = { 0, 3, 0, 41, 7 } },
+ { .lt = { 0, 3, 0, 48, 0 } },
+};
+
+/* DP2.1 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_uhbr[] = {
+ { .lt = { 0, 0, 0, 48, 0 } },
+ { .lt = { 0, 0, 0, 43, 5 } },
+ { .lt = { 0, 0, 0, 40, 8 } },
+ { .lt = { 0, 0, 0, 37, 11 } },
+ { .lt = { 0, 0, 0, 33, 15 } },
+ { .lt = { 0, 0, 2, 46, 0 } },
+ { .lt = { 0, 0, 2, 42, 4 } },
+ { .lt = { 0, 0, 2, 38, 8 } },
+ { .lt = { 0, 0, 2, 35, 11 } },
+ { .lt = { 0, 0, 2, 33, 13 } },
+ { .lt = { 0, 0, 4, 44, 0 } },
+ { .lt = { 0, 0, 4, 40, 4 } },
+ { .lt = { 0, 0, 4, 37, 7 } },
+ { .lt = { 0, 0, 4, 33, 11 } },
+ { .lt = { 0, 0, 8, 40, 0 } },
+ { .lt = { 1, 0, 2, 26, 2 } },
+};
+
+/* eDp */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_edp[] = {
+ { .lt = { 1, 0, 0, 12, 0 } },
+ { .lt = { 1, 1, 0, 13, 1 } },
+ { .lt = { 1, 2, 0, 15, 3 } },
+ { .lt = { 1, 3, 0, 19, 7 } },
+ { .lt = { 1, 1, 0, 14, 0 } },
+ { .lt = { 1, 2, 0, 16, 2 } },
+ { .lt = { 1, 3, 0, 21, 5 } },
+ { .lt = { 1, 2, 0, 18, 0 } },
+ { .lt = { 1, 3, 0, 22, 4 } },
+ { .lt = { 1, 3, 0, 26, 0 } },
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_dp14 = {
+ .entries = _xe3plpd_lt_trans_dp14,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_dp14),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_uhbr = {
+ .entries = _xe3plpd_lt_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_uhbr),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_edp = {
+ .entries = _xe3plpd_lt_trans_edp,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_edp),
+};
+
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1707,11 +1771,26 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
+static const struct intel_ddi_buf_trans *
+xe3plpd_get_lt_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_uhbr, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&xe3plpd_lt_trans_dp14, n_entries);
+}
+
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->get_buf_trans = xe3plpd_get_lt_buf_trans;
+ } else if (DISPLAY_VER(display) >= 14) {
if (intel_encoder_is_c10phy(encoder))
encoder->get_buf_trans = mtl_get_c10_buf_trans;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 29a190390192..cec332090a20 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -50,6 +50,14 @@ struct dg2_snps_phy_buf_trans {
u8 post_cursor;
};
+struct xe3plpd_lt_phy_buf_trans {
+ u8 txswing;
+ u8 txswing_level;
+ u8 pre_cursor;
+ u8 main_cursor;
+ u8 post_cursor;
+};
+
union intel_ddi_buf_trans_entry {
struct hsw_ddi_buf_trans hsw;
struct bxt_ddi_buf_trans bxt;
@@ -57,6 +65,7 @@ union intel_ddi_buf_trans_entry {
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
struct dg2_snps_phy_buf_trans snps;
+ struct xe3plpd_lt_phy_buf_trans lt;
};
struct intel_ddi_buf_trans {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 784fd90f300b..42ec78798666 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -52,7 +52,6 @@
#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
@@ -61,6 +60,7 @@
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
+#include "intel_casf.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
#include "intel_color.h"
@@ -77,6 +77,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -100,6 +101,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
@@ -130,11 +132,9 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
-#include "vlv_sideband.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -142,65 +142,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
-/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_device *drm)
-{
- int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
- /* Obtain SKU information */
- hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
- CCK_FUSE_HPLL_FREQ_MASK;
-
- return vco_freq[hpll_freq] * 1000;
-}
-
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq)
-{
- u32 val;
- int divider;
-
- val = vlv_cck_read(drm, reg);
- divider = val & CCK_FREQUENCY_VALUES;
-
- drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
-
- return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
-}
-
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg)
-{
- struct drm_i915_private *dev_priv = to_i915(drm);
- int hpll;
-
- vlv_cck_get(drm);
-
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
-
- hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
-
- vlv_cck_put(drm);
-
- return hpll;
-}
-
-void intel_update_czclk(struct intel_display *display)
-{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (!display->platform.valleyview && !display->platform.cherryview)
- return;
-
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
- CCK_CZ_CLOCK_CONTROL);
-
- drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
-}
-
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
{
return (crtc_state->active_planes &
@@ -892,9 +833,8 @@ static void intel_async_flip_vtd_wa(struct intel_display *display,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
+ return crtc_state->uapi.async_flip && intel_display_vtd_active(display) &&
(DISPLAY_VER(display) == 9 || display->platform.broadwell ||
display->platform.haswell);
}
@@ -1041,6 +981,24 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}
+static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
+static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
#undef is_disabling
#undef is_enabling
@@ -1196,6 +1154,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (audio_disabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_disable(state, crtc);
+ if (intel_casf_disabling(old_crtc_state, new_crtc_state))
+ intel_casf_disable(new_crtc_state);
+
intel_drrs_deactivate(old_crtc_state);
if (hsw_ips_pre_update(state, crtc))
@@ -1643,8 +1604,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(display))
- intel_vrr_set_transcoder_timings(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
@@ -2423,39 +2383,44 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- int vblank_delay = 0;
+ int set_context_latency = 0;
if (!HAS_DSB(display))
return 0;
- vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
+ set_context_latency = max(set_context_latency,
+ intel_psr_min_set_context_latency(crtc_state));
- return vblank_delay;
+ return set_context_latency;
}
-static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- int vblank_delay, max_vblank_delay;
+ int set_context_latency, max_vblank_delay;
+
+ set_context_latency = intel_crtc_set_context_latency(crtc_state);
- vblank_delay = intel_crtc_vblank_delay(crtc_state);
max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
- if (vblank_delay > max_vblank_delay) {
- drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n",
- crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay);
+ if (set_context_latency > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name,
+ set_context_latency,
+ max_vblank_delay);
return -EINVAL;
}
- adjusted_mode->crtc_vblank_start += vblank_delay;
+ crtc_state->set_context_latency = set_context_latency;
+ adjusted_mode->crtc_vblank_start += set_context_latency;
return 0;
}
@@ -2467,11 +2432,11 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- ret = intel_crtc_compute_vblank_delay(state, crtc);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
return ret;
- ret = intel_dpll_crtc_compute_clock(state, crtc);
+ ret = intel_crtc_compute_set_context_latency(state, crtc);
if (ret)
return ret;
@@ -2488,6 +2453,8 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
if (crtc_state->has_pch_encoder)
return ilk_fdi_compute_config(crtc, crtc_state);
+ intel_vrr_compute_guardband(crtc_state);
+
return 0;
}
@@ -2679,13 +2646,16 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
if (DISPLAY_VER(display) >= 4)
@@ -2769,13 +2739,16 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
/*
@@ -2882,11 +2855,24 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder))
- adjusted_mode->crtc_vblank_start =
- adjusted_mode->crtc_vdisplay +
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) {
+ pipe_config->set_context_latency =
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ pipe_config->set_context_latency;
+ } else if (DISPLAY_VER(display) == 12) {
+ /*
+ * TGL doesn't have a dedicated register for SCL.
+ * Instead, the hardware derives SCL from the difference between
+ * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive.
+ * To reflect the HW behaviour, readout the value for SCL as
+ * Vblank start - Vactive.
+ */
+ pipe_config->set_context_latency =
+ adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ }
if (DISPLAY_VER(display) >= 30)
pipe_config->min_hblank = intel_de_read(display,
@@ -3953,6 +3939,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_joiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
+ /* intel_vrr_get_config() depends on .framestart_delay */
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
+ /*
+ * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY
+ * readout done by intel_get_transcoder_timings().
+ */
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
@@ -4004,15 +4004,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
-
- pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
- } else {
- /* no idea if this is correct */
- pipe_config->framestart_delay = 1;
- }
-
out:
intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains);
@@ -4259,9 +4250,14 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
+ ret = intel_casf_compute_config(crtc_state);
+ if (ret)
+ return ret;
+
if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
- intel_crtc_needs_fastset(crtc_state)) {
+ intel_crtc_needs_fastset(crtc_state) ||
+ intel_casf_needs_scaler(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
return ret;
@@ -4640,7 +4636,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
+ crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
@@ -4761,8 +4757,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_vrr_compute_config_late(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4997,9 +4991,33 @@ static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_s
* Allow fastboot to fix up vblank delay (handled via LRR
* codepaths), a bit dodgy as the registers aren't
* double buffered but seems to be working more or less...
+ *
+ * Also allow this when the VRR timing generator is always on,
+ * and optimized guardband is used. In such cases,
+ * vblank delay may vary even without inherited state, but it's
+ * still safe as VRR guardband is still same.
*/
- return HAS_LRR(display) && old_crtc_state->inherited &&
- !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+ return HAS_LRR(display) &&
+ (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
+static void
+pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
+ const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ char *chipname = "LTPHY";
+
+ pipe_config_mismatch(p, fastset, crtc, name, chipname);
+
+ drm_printf(p, "expected:\n");
+ intel_lt_phy_dump_hw_state(display, a);
+ drm_printf(p, "found:\n");
+ intel_lt_phy_dump_hw_state(display, b);
}
bool
@@ -5126,6 +5144,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_PLL_LT(name) do { \
+ if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->name, \
+ &pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
@@ -5320,6 +5348,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_I(pixel_rate);
+ PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable);
+ PIPE_CONF_CHECK_I(hw.casf_params.win_size);
+ PIPE_CONF_CHECK_I(hw.casf_params.strength);
PIPE_CONF_CHECK_X(gamma_mode);
if (display->platform.cherryview)
@@ -5350,7 +5381,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
+ else if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
@@ -5444,6 +5477,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.guardband);
}
+ PIPE_CONF_CHECK_I(set_context_latency);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -5690,6 +5725,23 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.enable)
+ enabled_pipes |= BIT(crtc->pipe);
+ else
+ enabled_pipes &= ~BIT(crtc->pipe);
+ }
+
+ return enabled_pipes;
+}
+
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes)
{
@@ -5719,12 +5771,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
- const struct drm_display_mode *new_adjusted_mode)
+static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
+ const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode;
+ const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+
return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
- old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal;
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal ||
+ old_crtc_state->set_context_latency != new_crtc_state->set_context_latency;
}
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
@@ -5750,8 +5806,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode,
- &new_crtc_state->hw.adjusted_mode))
+ if (!lrr_params_changed(old_crtc_state, new_crtc_state))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -6343,7 +6398,6 @@ int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = false;
if (!intel_display_driver_check_access(display))
return -ENODEV;
@@ -6451,14 +6505,11 @@ int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- any_ms = true;
-
intel_dpll_release(state, crtc);
}
- if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(display->drm,
- "rejecting conflicting digital port configuration\n");
+ if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) {
+ drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
}
@@ -6467,29 +6518,25 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state);
+
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state, any_ms);
+ ret = intel_bw_atomic_check(state);
if (ret)
goto fail;
- ret = intel_cdclk_atomic_check(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state);
if (ret)
goto fail;
- if (intel_any_crtc_needs_modeset(state))
- any_ms = true;
-
- if (any_ms) {
+ if (intel_any_crtc_needs_modeset(state)) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
-
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
}
ret = intel_pmdemand_atomic_check(state);
@@ -6740,6 +6787,11 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
intel_vrr_set_transcoder_timings(new_crtc_state);
}
+ if (intel_casf_enabling(new_crtc_state, old_crtc_state))
+ intel_casf_enable(new_crtc_state);
+ else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength)
+ intel_casf_update_strength(new_crtc_state);
+
intel_fbc_update(state, crtc);
drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF));
@@ -7308,7 +7360,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
new_crtc_state);
intel_dsb_interrupt(new_crtc_state->dsb_commit);
@@ -7398,13 +7450,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
intel_pmdemand_pre_plane_update(state);
- if (state->modeset) {
+ if (state->modeset)
drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
- intel_set_cdclk_pre_plane_update(state);
+ intel_set_cdclk_pre_plane_update(state);
+ if (state->modeset)
intel_modeset_verify_disabled(state);
- }
intel_sagv_pre_plane_update(state);
@@ -7517,8 +7569,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -8004,6 +8055,14 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ /*
+ * WM_LINETIME only goes up to (almost) 64 usec, and also
+ * knowing that the linetime is always bounded will ease the
+ * mind during various calculations.
+ */
+ if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -8328,7 +8387,5 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
bool intel_scanout_needs_vtd_wa(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 37e2ab301a80..fc2ef92ccf68 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -394,6 +394,8 @@ enum phy_fia {
i)
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -435,11 +437,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_device *drm);
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
@@ -528,7 +525,6 @@ void intel_init_display_hooks(struct intel_display *display);
void intel_setup_outputs(struct intel_display *display);
int intel_initial_commit(struct intel_display *display);
void intel_panel_sanitize_ssc(struct intel_display *display);
-void intel_update_czclk(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index d56065f22655..9a47aa38cf82 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include "i915_drv.h"
-#include "intel_display_conversion.h"
+#include <drm/intel/display_member.h>
-static struct intel_display *__i915_to_display(struct drm_i915_private *i915)
-{
- return i915->display;
-}
+#include "intel_display_conversion.h"
struct intel_display *__drm_to_display(struct drm_device *drm)
{
- return __i915_to_display(to_i915(drm));
+ /*
+ * Note: This relies on both struct drm_i915_private and struct
+ * xe_device having the struct drm_device and struct intel_display *
+ * members at the same relative offsets, as defined by struct
+ * __intel_generic_device.
+ *
+ * See also INTEL_DISPLAY_MEMBER_STATIC_ASSERT().
+ */
+ struct __intel_generic_device *d = container_of(drm, struct __intel_generic_device, drm);
+
+ return d->display;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 8c226406c5cd..893279be8409 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -41,6 +41,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display_parent_interface;
struct intel_dmc;
struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
@@ -291,6 +292,9 @@ struct intel_display {
/* Intel PCH: where the south display engine lives */
enum intel_pch pch_type;
+ /* Parent, or core, driver functions exposed to display */
+ const struct intel_display_parent_interface *parent;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -370,6 +374,10 @@ struct intel_display {
} dbuf;
struct {
+ struct intel_global_obj obj;
+ } dbuf_bw;
+
+ struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -475,7 +483,21 @@ struct intel_display {
struct work_struct vblank_notify_work;
- u32 de_irq_mask[I915_MAX_PIPES];
+ /*
+ * Cached value of VLV/CHV IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 vlv_imr_mask;
+ /*
+ * Cached value of gen 5-7 DE IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 ilk_de_imr_mask;
+ /*
+ * Cached value of BDW+ DE pipe IMR to avoid reads in updating
+ * the bitfield.
+ */
+ u32 de_pipe_imr_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
@@ -568,6 +590,11 @@ struct intel_display {
} state;
struct {
+ unsigned int hpll_freq;
+ unsigned int czclk_freq;
+ } vlv_clock;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 8345faa38f04..0b3fd65dac0f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -48,6 +48,7 @@
#include "intel_psr_regs.h"
#include "intel_vdsc.h"
#include "intel_wm.h"
+#include "intel_tc.h"
static struct intel_display *node_to_intel_display(struct drm_info_node *node)
{
@@ -247,6 +248,8 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *mode;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_digital_port *dig_port = NULL;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
@@ -269,14 +272,19 @@ static void intel_connector_info(struct seq_file *m,
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
+ dig_port = dp_to_dig_port(intel_attached_dp(intel_connector));
break;
case DRM_MODE_CONNECTOR_HDMIA:
intel_hdmi_info(m, intel_connector);
+ dig_port = hdmi_to_dig_port(intel_attached_hdmi(intel_connector));
break;
default:
break;
}
+ if (dig_port != NULL && intel_encoder_is_tc(&dig_port->base))
+ intel_tc_info(&p, dig_port);
+
intel_hdcp_info(m, intel_connector);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index a002bc6ce7b0..328447a5e5e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
PLATFORM_GROUP(dgfx),
};
+static const u16 wcl_ids[] = {
+ INTEL_WCL_IDS(ID),
+ 0
+};
+
static const struct platform_desc ptl_desc = {
PLATFORM(pantherlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(pantherlake, wildcatlake),
+ .pciidlist = wcl_ids,
+ },
+ {},
+ }
};
__diag_pop();
@@ -1482,6 +1494,7 @@ static const struct {
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
@@ -1634,7 +1647,8 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent)
{
struct intel_display *display;
const struct intel_display_device_info *info;
@@ -1650,6 +1664,8 @@ struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
+ display->parent = parent;
+
intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index f329f1beafef..b559ef43d547 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -13,6 +13,7 @@
struct drm_printer;
struct intel_display;
+struct intel_display_parent_interface;
struct pci_dev;
/*
@@ -101,7 +102,9 @@ struct pci_dev;
/* Display ver 14.1 (based on GMD ID) */ \
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
- func(pantherlake)
+ func(pantherlake) \
+ func(pantherlake_wildcatlake)
+
#define __MEMBER(name) unsigned long name:1;
#define __COUNT(x) 1 +
@@ -140,10 +143,13 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
+#define HAS_128B_Y_TILING(__display) (!(__display)->platform.i915g && !(__display)->platform.i915gm)
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
@@ -155,7 +161,7 @@ struct intel_display_platforms {
#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
-#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell)
#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
@@ -308,7 +314,8 @@ struct intel_display_device_info {
bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent);
void intel_display_device_remove(struct intel_display *display);
void intel_display_device_info_runtime_init(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index add044fdb347..63942ebf46fb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -19,7 +19,7 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_utils.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -29,12 +29,14 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
@@ -286,6 +288,10 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_wq_unordered;
+ ret = intel_dbuf_bw_init(display);
+ if (ret)
+ goto cleanup_wq_unordered;
+
ret = intel_bw_init(display);
if (ret)
goto cleanup_wq_unordered;
@@ -483,7 +489,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
- intel_update_czclk(display);
intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 89622a6e97f4..43b27deb4a26 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -141,14 +141,14 @@ void ilk_update_display_irq(struct intel_display *display,
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- new_val = dev_priv->irq_mask;
+ new_val = display->irq.ilk_de_imr_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->irq_mask &&
+ if (new_val != display->irq.ilk_de_imr_mask &&
!drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
- dev_priv->irq_mask = new_val;
- intel_de_write(display, DEIMR, dev_priv->irq_mask);
+ display->irq.ilk_de_imr_mask = new_val;
+ intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
intel_de_posting_read(display, DEIMR);
}
}
@@ -216,13 +216,13 @@ static void bdw_update_pipe_irq(struct intel_display *display,
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = display->irq.de_irq_mask[pipe];
+ new_val = display->irq.de_pipe_imr_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != display->irq.de_irq_mask[pipe]) {
- display->irq.de_irq_mask[pipe] = new_val;
- intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
+ if (new_val != display->irq.de_pipe_imr_mask[pipe]) {
+ display->irq.de_pipe_imr_mask[pipe] = new_val;
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -873,7 +873,7 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
@@ -924,7 +924,7 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
@@ -973,6 +973,53 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
}
}
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier)
+{
+ /* disable master interrupt before clearing iir */
+ *de_ier = intel_de_read_fw(display, DEIER);
+ intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ /*
+ * Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will be stored on its back queue, and then we'll be able
+ * to process them after we restore SDEIER (as soon as we restore it,
+ * we'll get an interrupt if SDEIIR still has something to process due
+ * to its back queue).
+ */
+ if (!HAS_PCH_NOP(display)) {
+ *sde_ier = intel_de_read_fw(display, SDEIER);
+ intel_de_write_fw(display, SDEIER, 0);
+ } else {
+ *sde_ier = 0;
+ }
+}
+
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier)
+{
+ intel_de_write_fw(display, DEIER, de_ier);
+
+ if (sde_ier)
+ intel_de_write_fw(display, SDEIER, sde_ier);
+}
+
+bool ilk_display_irq_handler(struct intel_display *display)
+{
+ u32 de_iir;
+ bool handled = false;
+
+ de_iir = intel_de_read_fw(display, DEIIR);
+ if (de_iir) {
+ intel_de_write_fw(display, DEIIR, de_iir);
+ if (DISPLAY_VER(display) >= 7)
+ _ivb_display_irq_handler(display, de_iir);
+ else
+ _ilk_display_irq_handler(display, de_iir);
+ handled = true;
+ }
+
+ return handled;
+}
+
static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
@@ -1866,8 +1913,6 @@ void vlv_display_error_irq_handler(struct intel_display *display,
static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -1882,7 +1927,7 @@ static void _vlv_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ display->irq.vlv_imr_mask = ~0u;
}
void vlv_display_irq_reset(struct intel_display *display)
@@ -1903,6 +1948,22 @@ void i9xx_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
}
+u32 i9xx_display_irq_enable_mask(struct intel_display *display)
+{
+ u32 enable_mask;
+
+ enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ if (DISPLAY_VER(display) >= 3)
+ enable_mask |= I915_ASLE_INTERRUPT;
+
+ if (HAS_HOTPLUG(display))
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+
+ return enable_mask;
+}
+
void i915_display_irq_postinstall(struct intel_display *display)
{
/*
@@ -1940,7 +2001,6 @@ static u32 vlv_error_mask(void)
static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -1974,11 +2034,11 @@ static void _vlv_display_irq_postinstall(struct intel_display *display)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u);
- dev_priv->irq_mask = ~enable_mask;
+ display->irq.vlv_imr_mask = ~enable_mask;
- intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
}
void vlv_display_irq_postinstall(struct intel_display *display)
@@ -1989,7 +2049,7 @@ void vlv_display_irq_postinstall(struct intel_display *display)
spin_unlock_irq(&display->irq.lock);
}
-void ibx_display_irq_reset(struct intel_display *display)
+static void ibx_display_irq_reset(struct intel_display *display)
{
if (HAS_PCH_NOP(display))
return;
@@ -2000,6 +2060,24 @@ void ibx_display_irq_reset(struct intel_display *display)
intel_de_write(display, SERR_INT, 0xffffffff);
}
+void ilk_display_irq_reset(struct intel_display *display)
+{
+ struct intel_uncore *uncore = to_intel_uncore(display->drm);
+
+ gen2_irq_reset(uncore, DE_IRQ_REGS);
+ display->irq.ilk_de_imr_mask = ~0u;
+
+ if (DISPLAY_VER(display) == 7)
+ intel_de_write(display, GEN7_ERR_INT, 0xffffffff);
+
+ if (display->platform.haswell) {
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
+ }
+
+ ibx_display_irq_reset(display);
+}
+
void gen8_display_irq_reset(struct intel_display *display)
{
enum pipe pipe;
@@ -2089,8 +2167,8 @@ void gen8_irq_power_well_post_enable(struct intel_display *display,
for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
- ~display->irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_pipe_imr_mask[pipe],
+ ~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
spin_unlock_irq(&display->irq.lock);
}
@@ -2184,8 +2262,6 @@ out:
void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
u32 display_mask, extra_mask;
if (DISPLAY_VER(display) >= 7) {
@@ -2217,11 +2293,11 @@ void ilk_de_irq_postinstall(struct intel_display *display)
if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
- i915->irq_mask = ~display_mask;
+ display->irq.ilk_de_imr_mask = ~display_mask;
ibx_irq_postinstall(display);
- intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
display_mask | extra_mask);
}
@@ -2306,12 +2382,12 @@ void gen8_de_irq_postinstall(struct intel_display *display)
}
for_each_pipe(display, pipe) {
- display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
+ display->irq.de_pipe_imr_mask[pipe],
de_pipe_enables);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index c66db3851da4..84acd31948cf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -47,8 +47,9 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier);
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier);
+bool ilk_display_irq_handler(struct intel_display *display);
void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
void gen11_display_irq_handler(struct intel_display *display);
@@ -56,11 +57,12 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
void i9xx_display_irq_reset(struct intel_display *display);
-void ibx_display_irq_reset(struct intel_display *display);
+void ilk_display_irq_reset(struct intel_display *display);
void vlv_display_irq_reset(struct intel_display *display);
void gen8_display_irq_reset(struct intel_display *display);
void gen11_display_irq_reset(struct intel_display *display);
+u32 i9xx_display_irq_enable_mask(struct intel_display *display);
void i915_display_irq_postinstall(struct intel_display *display);
void i965_display_irq_postinstall(struct intel_display *display);
void vlv_display_irq_postinstall(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_jiffies.h b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
new file mode 100644
index 000000000000..c060c567e262
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_JIFFIES_H__
+#define __INTEL_DISPLAY_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+#endif /* __INTEL_DISPLAY_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c4b919f44556..fbfa823b6dce 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -13,7 +13,6 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -25,6 +24,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 39b71fffa2cd..9b49952994ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1516,7 +1516,11 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc xelpdp_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1534,6 +1538,7 @@ static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
I915_DECL_PW_DOMAINS(xe2lpd_pwdoms_pica_tc,
@@ -1584,6 +1589,7 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
/*
@@ -1677,16 +1683,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
- .instances = &I915_PW_INSTANCES(
- I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
- I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
- I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
- I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
- I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
- I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
- ),
- .ops = &xelpdp_aux_power_well_ops,
},
};
@@ -1715,6 +1711,7 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xe3lpd_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
static const struct i915_power_well_desc wcl_power_wells_main[] = {
@@ -1751,7 +1748,11 @@ static const struct i915_power_well_desc wcl_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_C),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc wcl_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1768,6 +1769,7 @@ static const struct i915_power_well_desc_list wcl_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(wcl_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(wcl_power_wells_aux),
};
static void init_power_well_domains(const struct i915_power_well_instance *inst,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index d1b70a117d73..eab7019f2252 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1866,18 +1866,36 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
* expected to just wait a fixed 600us after raising the request
* bit.
*/
- usleep_range(600, 1200);
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_set(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel power to be up\n",
+ phy_name(phy));
+ } else {
+ usleep_range(600, 1200);
+ }
}
static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
- usleep_range(10, 30);
+
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_clear(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel to powerdown\n",
+ phy_name(phy));
+ } else {
+ usleep_range(10, 30);
+ }
}
static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
index 56c4024201c1..0a331f89b4db 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rpm.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -1,69 +1,62 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
-#include "i915_drv.h"
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
-#include "intel_runtime_pm.h"
-
-static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return &i915->runtime_pm;
-}
struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
{
- return intel_runtime_pm_get_raw(display_to_rpm(display));
+ return display->parent->rpm->get_raw(display->drm);
}
void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+ display->parent->rpm->put_raw(display->drm, wakeref);
}
struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
{
- return intel_runtime_pm_get(display_to_rpm(display));
+ return display->parent->rpm->get(display->drm);
}
struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
{
- return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+ return display->parent->rpm->get_if_in_use(display->drm);
}
struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
{
- return intel_runtime_pm_get_noresume(display_to_rpm(display));
+ return display->parent->rpm->get_noresume(display->drm);
}
void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put(display_to_rpm(display), wakeref);
+ display->parent->rpm->put(display->drm, wakeref);
}
void intel_display_rpm_put_unchecked(struct intel_display *display)
{
- intel_runtime_pm_put_unchecked(display_to_rpm(display));
+ display->parent->rpm->put_unchecked(display->drm);
}
bool intel_display_rpm_suspended(struct intel_display *display)
{
- return intel_runtime_pm_suspended(display_to_rpm(display));
+ return display->parent->rpm->suspended(display->drm);
}
void assert_display_rpm_held(struct intel_display *display)
{
- assert_rpm_wakelock_held(display_to_rpm(display));
+ display->parent->rpm->assert_held(display->drm);
}
void intel_display_rpm_assert_block(struct intel_display *display)
{
- disable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_block(display->drm);
}
void intel_display_rpm_assert_unblock(struct intel_display *display)
{
- enable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_unblock(display->drm);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 358ab922d7a7..00600134bda0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -551,7 +551,16 @@ struct intel_connector {
u8 fec_capability;
u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_throughput_quirk:1;
u8 dsc_decompression_enabled:1;
+
+ struct {
+ struct {
+ int rgb_yuv444;
+ int yuv422_420;
+ } overall_throughput;
+ int max_line_width;
+ } dsc_branch_caps;
} dp;
struct {
@@ -946,6 +955,26 @@ struct intel_csc_matrix {
u16 postoff[3];
};
+enum intel_panel_replay_dsc_support {
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED,
+ INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY,
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE,
+};
+
+struct scaler_filter_coeff {
+ u16 sign;
+ u16 exp;
+ u16 mantissa;
+};
+
+struct intel_casf {
+ #define SCALER_FILTER_NUM_TAPS 7
+ struct scaler_filter_coeff coeff[SCALER_FILTER_NUM_TAPS];
+ u8 strength;
+ u8 win_size;
+ bool casf_enable;
+};
+
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -982,6 +1011,7 @@ struct intel_crtc_state {
struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
struct drm_display_mode mode, pipe_mode, adjusted_mode;
enum drm_scaling_filter scaling_filter;
+ struct intel_casf casf_params;
} hw;
/* actual state of LUTs */
@@ -1124,9 +1154,12 @@ struct intel_crtc_state {
bool has_panel_replay;
bool wm_level_disabled;
bool pkg_c_latency_used;
+ /* Only used for state verification. */
+ enum intel_panel_replay_dsc_support panel_replay_dsc_support;
u32 dc3co_exitline;
u16 su_y_granularity;
u8 active_non_psr_pipes;
+ const char *no_psr_reason;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1183,7 +1216,9 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
- int min_cdclk[I915_MAX_PLANES];
+ int min_cdclk;
+
+ int plane_min_cdclk[I915_MAX_PLANES];
/* for packed/planar CbCr */
u32 data_rate[I915_MAX_PLANES];
@@ -1268,6 +1303,8 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
+ /* Only used for state computation, not read out from the HW. */
+ bool compression_enabled_on_link;
bool compression_enable;
int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
@@ -1341,6 +1378,20 @@ struct intel_crtc_state {
/* LOBF flag */
bool has_lobf;
+
+ /* W2 window or 'set context latency' lines */
+ u16 set_context_latency;
+
+ struct {
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
+ /* LNL and beyond */
+ u8 check_entry_lines;
+ u8 aux_less_wake_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
+ } alpm_state;
};
enum intel_pipe_crc_source {
@@ -1679,16 +1730,22 @@ struct intel_psr {
bool source_panel_replay_support;
bool sink_panel_replay_support;
bool sink_panel_replay_su_support;
+ enum intel_panel_replay_dsc_support sink_panel_replay_dsc_support;
bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
u8 entry_setup_frames;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
bool link_ok;
bool pkg_c_latency_used;
u8 active_non_psr_pipes;
+
+ const char *no_psr_reason;
};
struct intel_dp {
@@ -1844,19 +1901,12 @@ struct intel_dp {
bool colorimetry_support;
struct {
- u8 io_wake_lines;
- u8 fast_wake_lines;
enum transcoder transcoder;
struct mutex lock;
- /* LNL and beyond */
- u8 check_entry_lines;
- u8 aux_less_wake_lines;
- u8 silence_period_sym_clocks;
- u8 lfps_half_cycle_num_of_syms;
bool lobf_disable_debug;
bool sink_alpm_error;
- } alpm_parameters;
+ } alpm;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.c b/drivers/gpu/drm/i915/display/intel_display_utils.c
new file mode 100644
index 000000000000..04d010f7c23e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/device.h>
+
+#include <drm/drm_device.h>
+
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
+#include "intel_display_core.h"
+#include "intel_display_utils.h"
+
+bool intel_display_run_as_guest(struct intel_display *display)
+{
+#if IS_ENABLED(CONFIG_X86)
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+#else
+ /* Not supported yet */
+ return false;
+#endif
+}
+
+bool intel_display_vtd_active(struct intel_display *display)
+{
+ if (device_iommu_mapped(display->drm->dev))
+ return true;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return intel_display_run_as_guest(display);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.h b/drivers/gpu/drm/i915/display/intel_display_utils.h
new file mode 100644
index 000000000000..2a18f160320c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_UTILS__
+#define __INTEL_DISPLAY_UTILS__
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+struct intel_display;
+
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+#ifndef fetch_and_zero
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+#endif
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+bool intel_display_run_as_guest(struct intel_display *display);
+bool intel_display_vtd_active(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_UTILS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index 31cd2c9cd488..c528aaa679ca 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -67,6 +67,8 @@ bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa,
return intel_display_needs_wa_16025573575(display);
case INTEL_DISPLAY_WA_14011503117:
return DISPLAY_VER(display) == 13;
+ case INTEL_DISPLAY_WA_22014263786:
+ return IS_DISPLAY_VERx100(display, 1100, 1400);
default:
drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
break;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index abc1df83f066..3644e8e2b724 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -25,6 +25,7 @@ enum intel_display_wa {
INTEL_DISPLAY_WA_16023588340,
INTEL_DISPLAY_WA_16025573575,
INTEL_DISPLAY_WA_14011503117,
+ INTEL_DISPLAY_WA_22014263786,
};
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 77a0199f9ea5..0bddb20a7c86 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -30,13 +30,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_flipq.h"
@@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
+MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
+
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
@@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
{
const char *fw_path = NULL;
u32 max_fw_size = 0;
-
- if (DISPLAY_VERx100(display) == 3002 ||
- DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002) {
+ fw_path = XE3LPD_3002_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -509,10 +513,16 @@ static u32 pipedmc_interrupt_mask(struct intel_display *display)
PIPEDMC_ATS_FAULT;
}
-static u32 dmc_evt_ctl_disable(void)
+static u32 dmc_evt_ctl_disable(u32 dmc_evt_ctl)
{
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ /*
+ * DMC_EVT_CTL_ENABLE cannot be cleared once set. Always
+ * configure it based on the original event definition to
+ * avoid mismatches in assert_dmc_loaded().
+ */
+ return (dmc_evt_ctl & DMC_EVT_CTL_ENABLE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVENT_FALSE);
}
@@ -546,6 +556,51 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static bool fixup_dmc_evt(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg_ctl, u32 *data_ctl,
+ i915_reg_t reg_htp, u32 *data_htp)
+{
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
+ return false;
+
+ if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
+ return false;
+
+ /* make sure reg_ctl and reg_htp are for the same event */
+ if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
+ i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
+ return false;
+
+ /*
+ * On ADL-S the HRR event handler is not restored after DC6.
+ * Clear it to zero from the beginning to avoid mismatches later.
+ */
+ if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl = 0;
+ *data_htp = 0;
+ return true;
+ }
+
+ /*
+ * TGL/ADL-S DMC firmware incorrectly uses the undelayed vblank
+ * event for the HRR handler, when it should be using the delayed
+ * vblank event instead. Fixed firmware was never released
+ * so the Windows driver just hacks around it by overriding
+ * the event ID. Do the same.
+ */
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl &= ~DMC_EVT_CTL_EVENT_ID_MASK;
+ *data_ctl |= REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ MAINDMC_EVENT_VBLANK_DELAYED_A);
+ return true;
+ }
+
+ return false;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -564,7 +619,7 @@ static bool disable_dmc_evt(struct intel_display *display,
/* also disable the HRR event on the main DMC on TGL/ADLS */
if ((display->platform.tigerlake || display->platform.alderlake_s) &&
- is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_DELAYED_A, reg, data))
return true;
return false;
@@ -577,7 +632,7 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return dmc_evt_ctl_disable();
+ return dmc_evt_ctl_disable(dmc->dmc_info[dmc_id].mmiodata[i]);
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
@@ -636,12 +691,6 @@ static void assert_dmc_loaded(struct intel_display *display,
found = intel_de_read(display, reg);
expected = dmc_mmiodata(display, dmc, dmc_id, i);
- /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
- if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
- found &= ~DMC_EVT_CTL_ENABLE;
- expected &= ~DMC_EVT_CTL_ENABLE;
- }
-
drm_WARN(display->drm, found != expected,
"DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
@@ -794,7 +843,7 @@ static void dmc_configure_event(struct intel_display *display,
if (!is_event_handler(display, dmc_id, event_id, reg, data))
continue;
- intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable(data));
num_handlers++;
}
@@ -1064,9 +1113,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+ }
+
+ for (i = 0; i < mmio_count - 1; i++) {
+ u32 orig_mmiodata[2] = {
+ dmc_info->mmiodata[i],
+ dmc_info->mmiodata[i+1],
+ };
+ if (!fixup_dmc_evt(display, dmc_id,
+ dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
+ dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
+ continue;
+
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
+ orig_mmiodata[0], dmc_info->mmiodata[i]);
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
+ i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
+ orig_mmiodata[1], dmc_info->mmiodata[i+1]);
+ }
+
+ for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
- i, mmioaddr[i], mmiodata[i],
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
@@ -1141,7 +1213,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
}
num_entries = package_header->num_entries;
- if (WARN_ON(package_header->num_entries > max_entries))
+ if (WARN_ON(num_entries > max_entries))
num_entries = max_entries;
fw_info = (const struct intel_fw_info *)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2eab591a8ef5..0ec82fcbcf48 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -51,7 +51,6 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -64,6 +63,8 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_jiffies.h"
+#include "intel_display_utils.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -93,14 +94,10 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-/* DP DSC throughput values used for slice count calculations KPixels/s */
-#define DP_DSC_PEAK_PIXEL_RATE 2720000
-#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
-#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-
/* Max DSC line buffer depth supported by HW. */
#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
@@ -1018,13 +1015,43 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
+ int tp_rgb_yuv444;
+ int tp_yuv422_420;
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420))
+ return 0;
+
+ if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width)
+ return 0;
+
+ /*
+ * TODO: Pass the total pixel rate of all the streams transferred to
+ * an MST tiled display, calculate the total slice count for all tiles
+ * from this and the per-tile slice count from the total slice count.
+ */
+ tp_rgb_yuv444 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, true);
+ tp_yuv422_420 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, false);
+
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * For now use the smaller of these, which is ok, potentially
+ * resulting in a higher than required minimum slice count.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ min_slice_count = DIV_ROUND_UP(mode_clock, min(tp_rgb_yuv444, tp_yuv422_420));
/*
* Due to some DSC engine BW limitations, we need to enable second
@@ -2340,24 +2367,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
-static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+/*
+ * Return whether FEC must be enabled for 8b10b SST or MST links. On 128b132b
+ * links FEC is always enabled implicitly by the HW, so this function returns
+ * false for that case.
+ */
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc)
{
- if (crtc_state->fec_enable)
- return;
+ if (intel_dp_is_uhbr(crtc_state))
+ return false;
/*
* Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
* Since, FEC is a bandwidth overhead, continue to not enable it for
* eDP. Until, there is a good reason to do so.
*/
- if (intel_dp_is_edp(intel_dp))
- return;
-
- if (intel_dp_is_uhbr(crtc_state))
- return;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return false;
- crtc_state->fec_enable = true;
+ return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state);
}
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
@@ -2375,7 +2404,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
int ret;
- intel_dp_fec_compute_config(intel_dp, pipe_config);
+ /*
+ * FIXME: set the FEC enabled state once pipe_config->port_clock is
+ * already known, so the UHBR/non-UHBR mode can be determined.
+ */
+ pipe_config->fec_enable = intel_dp_needs_8b10b_fec(pipe_config, true);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2450,7 +2483,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return ret;
}
- pipe_config->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(pipe_config);
+
drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
@@ -2460,6 +2494,40 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+static int
+dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_throughput_quirk)
+ return INT_MAX;
+
+ /*
+ * Synaptics Panamera branch devices have a problem decompressing a
+ * stream with a compressed link-bpp higher than 12, if the pixel
+ * clock is higher than ~50 % of the maximum overall throughput
+ * reported by the branch device. Work around this by limiting the
+ * maximum link bpp for such pixel clocks.
+ *
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output, after determining the pixel clock limit for
+ * YUV modes. For now use the smaller of the throughput values, which
+ * may result in limiting the link-bpp value already at a lower than
+ * required mode clock in case of native YUV422/420 output formats.
+ * The RGB/YUV444 throughput value should be always either equal or
+ * smaller than the YUV422/420 value, but let's not depend on this
+ * assumption.
+ */
+ if (adjusted_mode->crtc_clock <
+ min(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420) / 2)
+ return INT_MAX;
+
+ return fxp_q4_from_int(12);
+}
+
/*
* Calculate the output link min, max bpp values in limits based on the pipe bpp
* range, crtc_state and dsc mode. Return true on success.
@@ -2491,6 +2559,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
} else {
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int throughput_max_bpp_x16;
dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
@@ -2505,6 +2574,19 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
+
+ throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, crtc_state);
+ throughput_max_bpp_x16 = clamp(throughput_max_bpp_x16,
+ limits->link.min_bpp_x16, max_link_bpp_x16);
+ if (throughput_max_bpp_x16 < max_link_bpp_x16) {
+ max_link_bpp_x16 = throughput_max_bpp_x16;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Decreasing link max bpp to " FXP_Q4_FMT " due to DSC throughput quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ FXP_Q4_ARGS(max_link_bpp_x16));
+ }
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
@@ -4169,7 +4251,36 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
dsc_dpcd);
}
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
+static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
+{
+ u8 branch_caps[DP_DSC_BRANCH_CAP_SIZE];
+ int line_width;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX;
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX;
+ connector->dp.dsc_branch_caps.max_line_width = INT_MAX;
+
+ if (!is_branch)
+ return;
+
+ if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps,
+ sizeof(branch_caps)) != 0)
+ return;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, true) ? : INT_MAX;
+
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, false) ? : INT_MAX;
+
+ line_width = drm_dp_dsc_branch_max_line_width(branch_caps);
+ connector->dp.dsc_branch_caps.max_line_width = line_width > 0 ? line_width : INT_MAX;
+}
+
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -4182,6 +4293,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
/* Clear fec_capable to avoid using stale values */
connector->dp.fec_capability = 0;
+ memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps));
+ connector->dp.dsc_throughput_quirk = false;
+
if (dpcd_rev < DP_DPCD_REV_14)
return;
@@ -4196,6 +4310,19 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
+
+ if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return;
+
+ init_dsc_overall_throughput_limits(connector, is_branch);
+
+ /*
+ * TODO: Move the HW rev check as well to the DRM core quirk table if
+ * that's required after clarifying the list of affected devices.
+ */
+ if (drm_dp_has_quirk(desc, DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) &&
+ desc->ident.hw_rev == 0x10)
+ connector->dp.dsc_throughput_quirk = true;
}
static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
@@ -4204,6 +4331,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
return;
intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+
+ if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
+ init_dsc_overall_throughput_limits(connector, false);
}
static void
@@ -4220,6 +4350,7 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
connector);
else
intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
+ &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd),
connector);
}
@@ -5553,7 +5684,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
if (intel_alpm_get_error(intel_dp)) {
intel_alpm_disable(intel_dp);
- intel_dp->alpm_parameters.sink_alpm_error = true;
+ intel_dp->alpm.sink_alpm_error = true;
}
if (intel_dp_test_short_pulse(intel_dp))
@@ -5921,6 +6052,8 @@ intel_dp_detect(struct drm_connector *_connector,
memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
+ intel_dp->psr.sink_panel_replay_dsc_support =
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
intel_dp_mst_disconnect(intel_dp);
@@ -6857,3 +6990,81 @@ void intel_dp_mst_resume(struct intel_display *display)
}
}
}
+
+static
+int intel_dp_sdp_compute_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int guardband = intel_crtc_vblank_length(crtc_state);
+ int min_sdp_guardband = intel_dp_sdp_min_guardband(crtc_state, false);
+
+ if (guardband < min_sdp_guardband) {
+ drm_dbg_kms(display->drm, "guardband %d < min sdp guardband %d\n",
+ guardband, min_sdp_guardband);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ intel_psr_compute_config_late(intel_dp, crtc_state);
+
+ ret = intel_dp_sdp_compute_config_late(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static
+int intel_dp_get_lines_for_sdp(const struct intel_crtc_state *crtc_state, u32 type)
+{
+ switch (type) {
+ case DP_SDP_VSC_EXT_VESA:
+ case DP_SDP_VSC_EXT_CEA:
+ return 10;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return 8;
+ case DP_SDP_PPS:
+ return 7;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return crtc_state->vrr.vsync_start + 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int sdp_guardband = 0;
+
+ if (assume_all_enabled ||
+ crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA));
+
+ if (assume_all_enabled ||
+ crtc_state->dsc.compression_enable)
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_PPS));
+
+ if ((assume_all_enabled && HAS_AS_SDP(display)) ||
+ crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_ADAPTIVE_SYNC));
+
+ return sdp_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index f90cfd1dbbd0..200a8b267f64 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,6 +12,7 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_desc;
struct drm_dp_vsc_sdp;
struct drm_encoder;
struct drm_modeset_acquire_ctx;
@@ -72,6 +73,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc);
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -199,7 +202,9 @@ bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
bool dsc,
struct link_config_limits *limits);
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
@@ -215,5 +220,10 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state);
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 829a7c0fbe4f..2e7dbaf511b9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,9 +5,9 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 27f3716bdc1f..aad5fe14962f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -27,9 +27,10 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_jiffies.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_encoder.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 352f7ef29c28..4c0b943fe86f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -33,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -43,6 +42,7 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
@@ -293,12 +293,22 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_stream_update_slots(crtc_state, mst_state);
}
- if (dsc) {
- if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
- return -EINVAL;
-
- crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
- }
+ /*
+ * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC
+ * after it was set by intel_dp_dsc_compute_config() ->
+ * intel_dp_needs_8b10b_fec().
+ */
+ crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc);
+ /*
+ * If FEC gets enabled only because of another compressed stream, FEC
+ * may not be supported for this uncompressed stream on the whole link
+ * path until the sink DPRX. In this case a downstream branch device
+ * will disable FEC for the uncompressed stream as expected and so the
+ * FEC support doesn't need to be checked for this uncompressed stream.
+ */
+ if (crtc_state->fec_enable && dsc &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
@@ -811,14 +821,14 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
return mask;
}
-static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
- u8 fec_pipe_mask = 0;
+ u8 dsc_pipe_mask = 0;
int ret;
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
@@ -831,16 +841,16 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
- if (crtc_state->fec_enable)
- fec_pipe_mask |= BIT(crtc->pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ dsc_pipe_mask |= BIT(crtc->pipe);
}
- if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask)
return 0;
- limits->force_fec_pipes |= mst_pipe_mask;
+ limits->link_dsc_pipes |= mst_pipe_mask;
- ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ ret = intel_modeset_pipes_in_mask_early(state, "MST DSC",
mst_pipe_mask);
return ret ? : -EAGAIN;
@@ -894,7 +904,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
int i;
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
- ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ ret = intel_dp_mst_check_dsc_change(state, mgr, limits);
if (ret)
return ret;
@@ -1658,6 +1668,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
+ struct drm_dp_desc desc;
if (!connector->dp.dsc_decompression_aux)
return;
@@ -1665,7 +1676,13 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
return;
- intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc,
+ drm_dp_is_branch(dpcd_caps)) < 0)
+ return;
+
+ intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV],
+ &desc, drm_dp_is_branch(dpcd_caps),
+ connector);
}
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 3f77ad92c156..5df6347a420d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -24,13 +24,13 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "vlv_dpio_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index f969c5399a51..2e1f67be8eda 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -17,6 +17,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -1232,6 +1233,28 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ /* TODO: Do the readback via intel_compute_shared_dplls() */
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -1691,6 +1714,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
+ .crtc_compute_clock = xe3plpd_crtc_compute_clock,
+};
+
static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
@@ -1789,7 +1816,9 @@ int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
void
intel_dpll_init_clock_hook(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ display->funcs.dpll = &xe3plpd_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 14)
display->funcs.dpll = &mtl_dpll_funcs;
else if (display->platform.dg2)
display->funcs.dpll = &dg2_dpll_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 8ea96cc524a1..92c433f7b7e2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,11 +27,11 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index f131bdd1c975..6183da90b28d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -267,6 +267,16 @@ struct intel_cx0pll_state {
bool tbt_mode;
};
+struct intel_lt_phy_pll_state {
+ u32 clock; /* in kHz */
+ u8 addr_msb[13];
+ u8 addr_lsb[13];
+ u8 data[13][4];
+ u8 config[3];
+ bool ssc_enabled;
+ bool tbt_mode;
+};
+
struct intel_dpll_hw_state {
union {
struct i9xx_dpll_hw_state i9xx;
@@ -276,6 +286,7 @@ struct intel_dpll_hw_state {
struct icl_dpll_hw_state icl;
struct intel_mpllb_state mpllb;
struct intel_cx0pll_state cx0pll;
+ struct intel_lt_phy_pll_state ltpll;
};
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index dee44d45b668..4ad4efbf9253 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -115,24 +115,6 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
}
-static int dsb_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- intel_pre_commit_crtc_state(state, crtc);
-
- if (pre_commit_is_vrr_active(state, crtc))
- /*
- * When the push is sent during vblank it will trigger
- * on the next scanline, hence we have up to one extra
- * scanline until the delayed vblank occurs after
- * TRANS_PUSH has been written.
- */
- return intel_vrr_vblank_delay(crtc_state) + 1;
- else
- return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
-}
-
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -723,7 +705,7 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0);
if (pre_commit_is_vrr_active(state, crtc)) {
- int vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ int vblank_delay = crtc_state->set_context_latency;
end = intel_vrr_vmin_vblank_start(crtc_state);
start = end - vblank_delay - latency;
@@ -815,16 +797,43 @@ void intel_dsb_chain(struct intel_atomic_state *state,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb)
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
- dsb_vblank_delay(state, crtc));
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int wait_scanlines;
+
+ if (pre_commit_is_vrr_active(state, crtc)) {
+ /*
+ * If the push happened before the vmin decision boundary
+ * we don't know how far we are from the undelayed vblank.
+ * Wait until we're past the vmin safe window, at which
+ * point we're SCL lines away from the delayed vblank.
+ *
+ * If the push happened after the vmin decision boundary
+ * the hardware itself guarantees that we're SCL lines
+ * away from the delayed vblank, and we won't be inside
+ * the vmin safe window so this extra wait does nothing.
+ */
+ intel_dsb_wait_scanline_out(state, dsb,
+ intel_vrr_safe_window_start(crtc_state),
+ intel_vrr_vmin_safe_window_end(crtc_state));
+ /*
+ * When the push is sent during vblank it will trigger
+ * on the next scanline, hence we have up to one extra
+ * scanline until the delayed vblank occurs after
+ * TRANS_PUSH has been written.
+ */
+ wait_scanlines = crtc_state->set_context_latency + 1;
+ } else {
+ wait_scanlines = intel_mode_vblank_delay(adjusted_mode);
+ }
- intel_dsb_wait_usec(dsb, usecs);
+ intel_dsb_wait_usec(dsb, intel_scanlines_to_usecs(adjusted_mode, wait_scanlines));
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index c8f4499916eb..2f31f2c1d0c5 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -48,8 +48,8 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb);
void intel_dsb_interrupt(struct intel_dsb *dsb);
void intel_dsb_wait_usec(struct intel_dsb *dsb, int count);
void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count);
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb);
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb);
void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
struct intel_dsb *dsb,
int lower, int upper);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 23402408e172..31edf57a296f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -38,10 +38,10 @@
#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_gmbus_regs.h"
@@ -106,8 +106,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u8 type, flags, seq_port;
u16 len;
enum port port;
-
- drm_dbg_kms(display->drm, "\n");
+ ssize_t ret;
+ bool hs_mode;
flags = *data++;
type = *data++;
@@ -129,45 +129,53 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
goto out;
}
- if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ hs_mode = (flags >> MIPI_TRANSFER_MODE_SHIFT) & 1;
+ if (hs_mode)
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
else
dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+ drm_dbg_kms(display->drm, "DSI packet: Port %c (seq %u), Flags 0x%02x, VC %u, %s, Type 0x%02x, Length %u, Data %*ph\n",
+ port_name(port), seq_port, flags, dsi_device->channel,
+ hs_mode ? "HS" : "LP", type, len, (int)len, data);
+
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- mipi_dsi_generic_write(dsi_device, NULL, 0);
+ ret = mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 1);
+ ret = mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 2);
+ ret = mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- mipi_dsi_generic_write(dsi_device, data, len);
+ ret = mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_DCS_LONG_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
+ if (ret < 0)
+ drm_err(display->drm, "DSI send packet failed with %pe\n", ERR_PTR(ret));
+
if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 08b48e36aca6..c2663d6e2c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,12 +34,12 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_dvo_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 1628b5d98bba..064c0d3e8177 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,12 +11,11 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
@@ -548,8 +547,6 @@ static bool plane_has_modifier(struct intel_display *display,
u8 plane_caps,
const struct intel_modifier_desc *md)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!IS_DISPLAY_VER(display, md->display_ver.from, md->display_ver.until))
return false;
@@ -561,15 +558,15 @@ static bool plane_has_modifier(struct intel_display *display,
* where supported.
*/
if (intel_fb_is_ccs_modifier(md->modifier) &&
- HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes)
+ HAS_AUX_CCS(display) != !!md->ccs.packed_aux_planes)
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
+ (DISPLAY_VER(display) < 14 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
+ (DISPLAY_VER(display) < 20 || display->platform.dgfx))
return false;
return true;
@@ -778,7 +775,6 @@ unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct intel_display *display = to_intel_display(fb->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
@@ -815,7 +811,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(display) == 2 || HAS_128_BYTE_Y_TILING(i915))
+ if (HAS_128B_Y_TILING(display))
return 128;
else
return 512;
@@ -2118,6 +2114,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2216,18 +2213,24 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err_free_panic;
+ }
- ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
+ ret = intel_fb_bo_framebuffer_init(obj, mode_cmd);
if (ret)
goto err_frontbuffer_put;
@@ -2324,6 +2327,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2350,20 +2356,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
- struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
- panic = intel_panic_alloc();
- if (!panic) {
- kfree(intel_fb);
- return NULL;
- }
-
- intel_fb->panic = panic;
-
return intel_fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index f6758814bfae..bfecd73d5fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -19,8 +19,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
/* Nothing to do for i915 */
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *_obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
index eefcb05a99f0..d775773c6c03 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -14,8 +14,7 @@ struct drm_mode_fb_cmd2;
void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_gem_object *
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index ab8fb360b0f5..a1e3083022ee 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -50,17 +50,16 @@
#include "gt/intel_gt_types.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
@@ -103,7 +102,8 @@ struct intel_fbc {
struct mutex lock;
unsigned int busy_bits;
- struct i915_stolen_fb compressed_fb, compressed_llb;
+ struct intel_stolen_node *compressed_fb;
+ struct intel_stolen_node *compressed_llb;
enum intel_fbc_id id;
@@ -142,15 +142,18 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
return stride;
}
-static unsigned int intel_fbc_cfb_cpp(void)
+static unsigned int intel_fbc_cfb_cpp(const struct intel_plane_state *plane_state)
{
- return 4; /* FBC always 4 bytes per pixel */
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ return max(cpp, 4);
}
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return intel_fbc_plane_stride(plane_state) * cpp;
}
@@ -204,7 +207,7 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return _intel_fbc_cfb_stride(display, cpp, width, stride);
}
@@ -377,20 +380,19 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
drm_WARN_ON(display->drm,
- range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_fb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
- range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_llb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
+ i915_gem_stolen_node_address(fbc->compressed_fb));
intel_de_write(display, FBC_LL_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
+ i915_gem_stolen_node_address(fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -498,7 +500,7 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, DPFC_CB_BASE,
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -567,7 +569,7 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -798,7 +800,6 @@ static u64 intel_fbc_cfb_base_max(struct intel_display *display)
static u64 intel_fbc_stolen_end(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -807,7 +808,7 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* underruns, even if that range is not reserved by the BIOS. */
if (display->platform.broadwell ||
(DISPLAY_VER(display) == 9 && !display->platform.broxton))
- end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
+ end = i915_gem_stolen_area_size(display->drm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -836,20 +837,19 @@ static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
@@ -862,17 +862,15 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_fb));
+ i915_gem_stolen_node_allocated(fbc->compressed_fb));
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_llb));
+ i915_gem_stolen_node_allocated(fbc->compressed_llb));
if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
- ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
- 4096, 4096);
+ ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096);
if (ret)
goto err;
}
@@ -888,14 +886,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
- i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
+ i915_gem_stolen_node_size(fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
err:
- if (i915_gem_stolen_initialized(i915))
+ if (i915_gem_stolen_initialized(display->drm))
drm_info_once(display->drm,
"not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
@@ -933,9 +931,12 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (IS_DISPLAY_VER(display, 11, 12))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
+ /*
+ * Wa_22014263786
+ * Fixes: Screen flicker with FBC and Package C state enabled
+ * Workaround: Forced SLB invalidation before start of new frame.
+ */
+ if (intel_display_wa(display, 22014263786))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -946,16 +947,13 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
- if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_fb))
+ i915_gem_stolen_remove_node(fbc->compressed_fb);
}
void intel_fbc_cleanup(struct intel_display *display)
@@ -968,6 +966,9 @@ void intel_fbc_cleanup(struct intel_display *display)
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+
kfree(fbc);
}
}
@@ -1084,11 +1085,57 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
}
}
+static bool
+xe3p_lpd_fbc_fp16_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xe3p_lpd_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (lnl_fbc_pixel_format_is_valid(plane_state))
+ return true;
+
+ if (xe3p_lpd_fbc_fp16_format_is_valid(plane_state))
+ return true;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ARGB16161616:
+ case DRM_FORMAT_ABGR16161616:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ return DISPLAY_VER(display) >= 35 &&
+ xe3p_lpd_fbc_fp16_format_is_valid(plane_state);
+}
+
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- if (DISPLAY_VER(display) >= 20)
+ if (DISPLAY_VER(display) >= 35)
+ return xe3p_lpd_fbc_pixel_format_is_valid(plane_state);
+ else if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_pixel_format_is_valid(plane_state);
@@ -1356,7 +1403,7 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
intel_fbc_cfb_size(plane_state) <= fbc->limit *
- i915_gem_stolen_node_size(&fbc->compressed_fb);
+ i915_gem_stolen_node_size(fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1422,6 +1469,18 @@ intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
}
}
+static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /* WaFbcExceedCdClockThreshold:hsw,bdw */
+ if (display->platform.haswell || display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no FBC specific limits to worry about */
+ return 0;
+}
+
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1437,7 +1496,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
- if (!i915_gem_stolen_initialized(i915)) {
+ if (!i915_gem_stolen_initialized(display->drm)) {
plane_state->no_fbc_reason = "stolen memory not initialised";
return 0;
}
@@ -1463,7 +1522,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) {
+ if (intel_display_vtd_active(display) &&
+ (display->platform.skylake || display->platform.broxton)) {
plane_state->no_fbc_reason = "VT-d enabled";
return 0;
}
@@ -1561,18 +1621,9 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- /* WaFbcExceedCdClockThreshold:hsw,bdw */
- if (display->platform.haswell || display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
- plane_state->no_fbc_reason = "pixel rate too high";
- return 0;
- }
+ if (_intel_fbc_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) {
+ plane_state->no_fbc_reason = "pixel rate too high";
+ return 0;
}
plane_state->no_fbc_reason = NULL;
@@ -1580,6 +1631,27 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ int min_cdclk;
+
+ if (!plane->fbc)
+ return 0;
+
+ min_cdclk = _intel_fbc_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough FBC will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ return min_cdclk;
+}
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
struct intel_crtc *crtc,
@@ -2084,6 +2156,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
if (!fbc)
return NULL;
+ fbc->compressed_fb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_fb)
+ goto err;
+ fbc->compressed_llb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_llb)
+ goto err;
+
fbc->id = fbc_id;
fbc->display = display;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -2103,6 +2182,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
fbc->funcs = &i8xx_fbc_funcs;
return fbc;
+
+err:
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ kfree(fbc);
+
+ return NULL;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 0e715cb6b4e6..91424563206a 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -28,6 +28,7 @@ enum intel_fbc_id {
};
int intel_fbc_atomic_check(struct intel_atomic_state *state);
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state);
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
@@ -52,5 +53,7 @@ void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane);
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index bf5721856f3c..e5449c41cfa1 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -205,6 +205,62 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_set_suspend = intelfb_set_suspend,
};
+static void intel_fbdev_fill_mode_cmd(struct drm_fb_helper_surface_size *sizes,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd->flags = DRM_MODE_FB_MODIFIERS;
+ mode_cmd->width = sizes->surface_width;
+ mode_cmd->height = sizes->surface_height;
+
+ mode_cmd->pitches[0] = intel_fbdev_fb_pitch_align(mode_cmd->width * DIV_ROUND_UP(sizes->surface_bpp, 8));
+ mode_cmd->pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd->modifier[0] = DRM_FORMAT_MOD_LINEAR;
+}
+
+static struct intel_framebuffer *
+__intel_fbdev_fb_alloc(struct intel_display *display,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ int size;
+
+ intel_fbdev_fill_mode_cmd(sizes, &mode_cmd);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = intel_fbdev_fb_bo_create(display->drm, size);
+ if (IS_ERR(obj)) {
+ fb = ERR_CAST(obj);
+ goto err;
+ }
+
+ fb = intel_framebuffer_create(obj,
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
+ if (IS_ERR(fb)) {
+ intel_fbdev_fb_bo_destroy(obj);
+ goto err;
+ }
+
+ drm_gem_object_put(obj);
+
+ return to_intel_framebuffer(fb);
+
+err:
+ return ERR_CAST(fb);
+
+}
+
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -235,7 +291,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
- fb = intel_fbdev_fb_alloc(helper, sizes);
+
+ fb = __intel_fbdev_fb_alloc(display, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
@@ -275,7 +332,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display->drm, info, obj, vma);
if (ret)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 9a3b12849c56..c3202ba141c5 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -3,42 +3,24 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct intel_display *display = to_intel_display(helper->dev);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+ return ALIGN(stride, 64);
+}
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct drm_i915_private *dev_priv = to_i915(drm);
+ struct drm_i915_gem_object *obj;
obj = ERR_PTR(-ENODEV);
if (HAS_LMEM(dev_priv)) {
@@ -53,31 +35,29 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
+ if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
- drm_get_format_info(display->drm,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- i915_gem_object_put(obj);
+ return &obj->base;
+}
- return to_intel_framebuffer(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -109,7 +89,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(display->drm,
+ drm_err(drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index cb7957272715..fd0b3775dc1f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -6,16 +6,18 @@
#ifndef __INTEL_FBDEV_FB_H__
#define __INTEL_FBDEV_FB_H__
-struct drm_fb_helper;
-struct drm_fb_helper_surface_size;
+#include <linux/types.h>
+
+struct drm_device;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct fb_info;
struct i915_vma;
-struct intel_display;
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+u32 intel_fbdev_fb_pitch_align(u32 stride);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj);
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 59a36b3a22c1..5bb0090dd5ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -9,13 +9,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index 6ab2272ab2df..f162614a925d 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -7,16 +7,16 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
-#include "intel_step.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
-#include "intel_flipq.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dsb.h"
+#include "intel_flipq.h"
+#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 531ee122bf82..7195e8cf671c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -19,9 +19,9 @@
#include <drm/intel/i915_component.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index c3907aeb0a78..3e7b480ee9f1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -10,7 +10,6 @@
#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
struct intel_hdcp_gsc_context {
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 4ab7e2e3bfd4..5c637341b210 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -45,7 +45,6 @@
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -55,6 +54,7 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
@@ -68,6 +68,20 @@
#include "intel_snps_phy.h"
#include "intel_vrr.h"
+bool intel_hdmi_is_frl(u32 clock)
+{
+ switch (clock) {
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index dec2ad7dd8a2..be2fad57e4ad 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -60,6 +60,7 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
+bool intel_hdmi_is_frl(u32 clock);
void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index b2a1df41c12c..235706229ffb 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -29,12 +29,12 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_utils.h"
#include "intel_connector.h"
-#include "intel_display_power.h"
#include "intel_display_core.h"
+#include "intel_display_power.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 4f72f3fb9af5..46c47b3d6f42 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -6,11 +6,11 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -420,6 +420,9 @@ u32 i9xx_hpd_irq_ack(struct intel_display *display)
u32 hotplug_status = 0, hotplug_status_mask;
int i;
+ if (!HAS_HOTPLUG(display))
+ return 0;
+
if (display->platform.g4x ||
display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index f52dee0ea412..d2862de894fa 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -20,6 +20,7 @@
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+#include "intel_vdsc.h"
static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
const struct intel_crtc *crtc)
@@ -55,7 +56,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
enum pipe pipe;
- limits->force_fec_pipes = 0;
+ limits->link_dsc_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
@@ -65,8 +66,8 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
- if (crtc_state->fec_enable)
- limits->force_fec_pipes |= BIT(pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ limits->link_dsc_pipes |= BIT(pipe);
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
@@ -265,10 +266,10 @@ assert_link_limit_change_valid(struct intel_display *display,
bool bpps_changed = false;
enum pipe pipe;
- /* FEC can't be forced off after it was forced on. */
+ /* DSC can't be disabled after it was enabled. */
if (drm_WARN_ON(display->drm,
- (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
- old_limits->force_fec_pipes))
+ (old_limits->link_dsc_pipes & new_limits->link_dsc_pipes) !=
+ old_limits->link_dsc_pipes))
return false;
for_each_pipe(display, pipe) {
@@ -286,8 +287,8 @@ assert_link_limit_change_valid(struct intel_display *display,
/* At least one limit must change. */
if (drm_WARN_ON(display->drm,
!bpps_changed &&
- new_limits->force_fec_pipes ==
- old_limits->force_fec_pipes))
+ new_limits->link_dsc_pipes ==
+ old_limits->link_dsc_pipes))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index 95ab7c50c61d..cb18e171037c 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -15,7 +15,7 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
- u8 force_fec_pipes;
+ u8 link_dsc_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d56026c4efdd..9ceabbc981a1 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -31,10 +31,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
new file mode 100644
index 000000000000..af48d6cde226
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -0,0 +1,2000 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_cx0_phy.h"
+#include "intel_cx0_phy_regs.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dpll_mgr.h"
+#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
+#include "intel_lt_phy_regs.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
+#include "intel_tc.h"
+
+#define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \
+ for ((__lane) = 0; (__lane) < 2; (__lane)++) \
+ for_each_if((__lane_mask) & BIT(__lane))
+
+#define INTEL_LT_PHY_LANE0 BIT(0)
+#define INTEL_LT_PHY_LANE1 BIT(1)
+#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\
+ INTEL_LT_PHY_LANE0)
+#define MODE_DP 3
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
+ .clock = 162000,
+ .config = {
+ 0x83,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x5, 0xa, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x4, 0x4, 0x82, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
+ .clock = 270000,
+ .config = {
+ 0x8b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x81, 0xad },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
+ .clock = 540000,
+ .config = {
+ 0x93,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0xa, 0x4, 0x81, 0xda },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
+ .clock = 810000,
+ .config = {
+ 0x9b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
+ .clock = 1000000,
+ .config = {
+ 0x43,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
+ .clock = 1350000,
+ .config = {
+ 0xcb,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x9, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x80, 0xe0 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
+ .clock = 2000000,
+ .config = {
+ 0x53,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_dp_hbr3,
+ &xe3plpd_lt_dp_uhbr10,
+ &xe3plpd_lt_dp_uhbr13_5,
+ &xe3plpd_lt_dp_uhbr20,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
+ .clock = 216000,
+ .config = {
+ 0xa3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
+ .clock = 243000,
+ .config = {
+ 0xab,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2f, 0x60 },
+ { 0xb0, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x13, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x47, 0x48, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
+ .clock = 324000,
+ .config = {
+ 0xb3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x8a, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
+ .clock = 432000,
+ .config = {
+ 0xbb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0xc, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
+ .clock = 675000,
+ .config = {
+ 0xdb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_edp_2_16,
+ &xe3plpd_lt_edp_2_43,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_edp_3_24,
+ &xe3plpd_lt_edp_4_32,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_edp_6_75,
+ &xe3plpd_lt_dp_hbr3,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
+ .clock = 25200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0c, 0x15, 0x27, 0x60 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x98, 0x28 },
+ { 0x42, 0x0, 0x84, 0x10 },
+ { 0x80, 0x0f, 0xd9, 0xb5 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
+ .clock = 27200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0b, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x96, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
+ .clock = 74250,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x4, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x88, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
+ .clock = 148500,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x84, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
+ .clock = 594000,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0, 0x95, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
+ &xe3plpd_lt_hdmi_252,
+ &xe3plpd_lt_hdmi_272,
+ &xe3plpd_lt_hdmi_742p5,
+ &xe3plpd_lt_hdmi_1p485,
+ &xe3plpd_lt_hdmi_5p94,
+ NULL,
+};
+
+static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (!intel_tc_port_in_dp_alt_mode(dig_port))
+ return INTEL_LT_PHY_BOTH_LANES;
+
+ return intel_tc_port_max_lane_count(dig_port) > 2
+ ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0;
+}
+
+static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
+{
+ return intel_cx0_read(encoder, lane_mask, addr);
+}
+
+static void intel_lt_phy_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
+{
+ intel_cx0_write(encoder, lane_mask, addr, data, committed);
+}
+
+static void intel_lt_phy_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+{
+ intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed);
+}
+
+static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder,
+ int lane)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_rmw(display,
+ XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY, 0);
+}
+
+static void
+assert_dc_off(struct intel_display *display)
+{
+ bool enabled;
+
+ enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
+ drm_WARN_ON(display->drm, !enabled);
+}
+
+static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int ack;
+ u32 val;
+
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
+ phy_name(phy));
+ intel_cx0_bus_reset(encoder, lane);
+ return -ETIMEDOUT;
+ }
+
+ intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0);
+
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING |
+ XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED |
+ XELPDP_PORT_M2P_DATA(data) |
+ XELPDP_PORT_M2P_ADDRESS(addr));
+
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ if (ack < 0)
+ return ack;
+
+ if (val & XELPDP_PORT_P2M_ERROR_SET) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Error occurred during P2P write command. Status: 0x%x\n",
+ phy_name(phy), val);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+ intel_cx0_bus_reset(encoder, lane);
+ return -EINVAL;
+ }
+
+ /*
+ * RE-VISIT:
+ * This needs to be added to give PHY time to set everything up this was a requirement
+ * to get the display up and running
+ * This is the time PHY takes to settle down after programming the PHY.
+ */
+ udelay(150);
+ intel_clear_response_ready_flag(encoder, lane);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+
+ return 0;
+}
+
+static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int i, status;
+
+ assert_dc_off(display);
+
+ /* 3 tries is assumed to be enough to write successfully */
+ for (i = 0; i < 3; i++) {
+ status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr,
+ expected_mac_val);
+
+ if (status == 0)
+ return;
+ }
+
+ drm_err_once(display->drm,
+ "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
+}
+
+static void intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ int lane;
+
+ for_each_lt_phy_lane_in_mask(lane_mask, lane)
+ __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val);
+}
+
+static void
+intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count)
+{
+ /*
+ * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled
+ * by dmc firmware not explicitly mentioned in Bspec. This leaves this
+ * function as a wrapper only but keeping it expecting future changes.
+ */
+ intel_cx0_setup_powerdown(encoder);
+}
+
+static void
+intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
+{
+ intel_cx0_powerdown_change_sequence(encoder, lane_mask, state);
+}
+
+static void
+intel_lt_phy_lane_reset(struct intel_encoder *encoder,
+ u8 lane_count)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP);
+
+ intel_lt_phy_setup_powerdown(encoder, lane_count);
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_RESET);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, 0);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_US,
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK assertion Ack not done after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_MS * 1000);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_pipe_reset | lane_phy_pulse_status, 0);
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status, 0,
+ XE3PLPD_RESET_END_LATENCY_US, 2, NULL))
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of Lane reset after %dus.\n",
+ phy_name(phy), XE3PLPD_RESET_END_LATENCY_US);
+
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status, lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
+ phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
+}
+
+static void
+intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool lane_reversal)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = 0;
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
+ XELPDP_PORT_REVERSAL,
+ lane_reversal ? XELPDP_PORT_REVERSAL : 0);
+
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
+ /*
+ * We actually mean MACCLK here and not MAXPCLK when using LT Phy
+ * but since the register bits still remain the same we use
+ * the same definition
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ intel_hdmi_is_frl(crtc_state->port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ else
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+
+ /* DP2.0 10G and 20G rates enable MPLLA*/
+ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
+ val |= XELPDP_SSC_ENABLE_PLLA;
+ else
+ val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_SSC_ENABLE_PLLB, val);
+}
+
+static u32 intel_lt_phy_get_dp_clock(u8 rate)
+{
+ switch (rate) {
+ case 0:
+ return 162000;
+ case 1:
+ return 270000;
+ case 2:
+ return 540000;
+ case 3:
+ return 810000;
+ case 4:
+ return 216000;
+ case 5:
+ return 243000;
+ case 6:
+ return 324000;
+ case 7:
+ return 432000;
+ case 8:
+ return 1000000;
+ case 9:
+ return 1350000;
+ case 10:
+ return 2000000;
+ case 11:
+ return 675000;
+ default:
+ MISSING_CASE(rate);
+ return 0;
+ }
+}
+
+static bool
+intel_lt_phy_config_changed(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val, rate;
+ u32 clock;
+
+ val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_0_CONFIG);
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val);
+
+ /*
+ * The only time we do not reconfigure the PLL is when we are
+ * using 1.62 Gbps clock since PHY PLL defaults to that
+ * otherwise we always need to reconfigure it.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ clock = intel_lt_phy_get_dp_clock(rate);
+ if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
+ return false;
+ }
+
+ return true;
+}
+
+static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
+
+ intel_psr_pause(intel_dp);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
+
+ return wakeref;
+}
+
+static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+static const struct intel_lt_phy_pll_state * const *
+intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return xe3plpd_lt_edp_tables;
+
+ return xe3plpd_lt_dp_tables;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return xe3plpd_lt_hdmi_tables;
+ }
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+static bool
+intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_panel_use_ssc(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+ }
+ }
+
+ return false;
+}
+
+static int
+intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
+{
+#define REF_CLK_KHZ 38400
+#define REGVAL(i) ( \
+ (lt_state->data[i][3]) | \
+ (lt_state->data[i][2] << 8) | \
+ (lt_state->data[i][1] << 16) | \
+ (lt_state->data[i][0] << 24) \
+)
+
+ int clk = 0;
+ u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
+ u64 temp0, temp1;
+ /*
+ * The algorithm uses '+' to combine bitfields when
+ * constructing PLL_reg3 and PLL_reg57:
+ * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new;
+ * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5);
+ *
+ * However, this is likely intended to be a bitwise OR operation,
+ * as each field occupies distinct, non-overlapping bits in the register.
+ *
+ * PLL_reg57 is composed of following fields packed into a 32-bit value:
+ * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27
+ * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18
+ * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits
+ * (though 8 bits are given to it) -> placed at bits 7-14
+ * - D6_new: fits in lower 7 bits -> placed at bits 0-6
+ * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new;
+ *
+ * Similarly, PLL_reg3 is packed as:
+ * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29
+ * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21
+ * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16
+ * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated)
+ * -> placed at bits 5-14
+ * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5);
+ */
+ pll_reg_5 = REGVAL(2);
+ pll_reg_3 = REGVAL(1);
+ pll_reg_57 = REGVAL(3);
+ m2div_frac = pll_reg_5;
+
+ /*
+ * From forward algorithm we know
+ * m2div = 2 * m2
+ * val = y * frequency * 5
+ * So now,
+ * frequency = (m2 * 2 * refclk_khz / (d8 * 10))
+ * frequency = (m2div * refclk_khz / (d8 * 10))
+ */
+ d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
+ m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
+ temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
+ temp1 = (u64)m2div_int * REF_CLK_KHZ;
+ if (d8 == 0)
+ return 0;
+
+ clk = div_u64((temp1 + temp0), d8 * 10);
+
+ return clk;
+}
+
+int
+intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ int clk;
+ const struct intel_lt_phy_pll_state *lt_state =
+ &crtc_state->dpll_hw_state.ltpll;
+ u8 mode, rate;
+
+ mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
+ lt_state->config[0]);
+ /*
+ * For edp/dp read the clock value from the tables
+ * and return the clock as the algorithm used for
+ * calculating the port clock does not exactly matches
+ * with edp/dp clock.
+ */
+ if (mode == MODE_DP) {
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK,
+ lt_state->config[0]);
+ clk = intel_lt_phy_get_dp_clock(rate);
+ } else {
+ clk = intel_lt_phy_calc_hdmi_port_clock(lt_state);
+ }
+
+ return clk;
+}
+
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_lt_phy_pll_state * const *tables;
+ int i;
+
+ tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->clock) {
+ crtc_state->dpll_hw_state.ltpll = *tables[i];
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ crtc_state->dpll_hw_state.ltpll.config[2] = 1;
+ }
+ crtc_state->dpll_hw_state.ltpll.ssc_enabled =
+ intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
+ return 0;
+ }
+ }
+
+ /* TODO: Add a function to compute the data for HDMI TMDS*/
+
+ return -EINVAL;
+}
+
+static void
+intel_lt_phy_program_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ int i, j, k;
+
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
+
+ for (i = 0; i <= 12; i++) {
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_msb[i],
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
+ MB_WRITE_COMMITTED);
+
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j),
+ crtc_state->dpll_hw_state.ltpll.data[i][k],
+ MB_WRITE_COMMITTED);
+ }
+}
+
+static void
+intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 lane_count = crtc_state->lane_count;
+ bool is_dp_alt =
+ intel_tc_port_in_dp_alt_mode(dig_port);
+ enum intel_tc_pin_assignment tc_pin =
+ intel_tc_port_get_pin_assignment(dig_port);
+ u8 transmitter_mask = 0;
+
+ /*
+ * We have a two transmitters per lane and total of 2 PHY lanes so a total
+ * of 4 transmitters. We prepare a mask of the lanes that need to be activated
+ * and the transmitter which need to be activated for each lane. TX 0,1 correspond
+ * to LANE0 and TX 2, 3 correspond to LANE1.
+ */
+
+ switch (lane_count) {
+ case 1:
+ transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0);
+ if (is_dp_alt) {
+ if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D)
+ transmitter_mask = REG_BIT8(0);
+ else
+ transmitter_mask = REG_BIT8(1);
+ }
+ break;
+ case 2:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(1, 0);
+ break;
+ case 3:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(2, 0);
+ break;
+ case 4:
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ }
+
+ if (transmitter_mask & BIT(0)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(1)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+
+ if (transmitter_mask & BIT(2)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(3)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+}
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref = 0;
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+ u8 rate_update;
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Enable MacCLK at default 162 MHz frequency. */
+ intel_lt_phy_lane_reset(encoder, crtc_state->lane_count);
+
+ /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
+ intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+
+ /* 3. Change owned PHY lanes power to Ready state. */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_READY);
+
+ /*
+ * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
+ * encoded rate and encoded mode.
+ */
+ if (intel_lt_phy_config_changed(encoder, crtc_state)) {
+ /*
+ * 5. Program the PHY internal PLL registers over PHY message bus for the desired
+ * frequency and protocol type
+ */
+ intel_lt_phy_program_pll(encoder, crtc_state);
+
+ /* 6. Use the P2P transaction flow */
+ /*
+ * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message
+ * bus for Owned PHY Lanes.
+ */
+ /*
+ * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR
+ * register at offset 0xC00 for Owned PHY Lanes*.
+ */
+ /* 6.3. Clear P2P transaction Ready bit. */
+ intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR,
+ LT_PHY_PCLKIN_GATE);
+
+ /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0), 0,
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
+ * Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
+ crtc_state->port_clock);
+
+ /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_US, 2, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack assertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_US);
+
+ /*
+ * 13. Ungate the forward clock by setting
+ * PORT_CLOCK_CTL[Forward Clock Ungate] = 1.
+ */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ /*
+ * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over
+ * PHY message bus for Owned PHY Lanes.
+ */
+ rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE);
+ rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE;
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ rate_update, MB_WRITE_COMMITTED);
+
+ /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status, lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 2, NULL))
+ drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
+ phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+
+ /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ } else {
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock);
+ }
+
+ /*
+ * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change.
+ * We handle this step in bxt_set_cdclk()
+ */
+ /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P0_STATE_ACTIVE);
+
+ intel_lt_phy_enable_disable_tx(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PIPE_RESET(0) |
+ XELPDP_LANE_PIPE_RESET(1))
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
+ lane_pipe_reset);
+
+ /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ lane_phy_current_status,
+ XE3PLPD_RESET_START_LATENCY_US, 0, NULL))
+ drm_warn(display->drm,
+ "PHY %c failed to reset Lane after %dms.\n",
+ phy_name(phy), XE3PLPD_RESET_START_LATENCY_US);
+
+ /* 4. Clear for PHY pulse status on owned PHY lanes. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /*
+ * 5. Follow the Display Voltage Frequency Switching -
+ * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 7. Program DDI_CLK_VALFREQ to 0. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0), 0,
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
+ drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
+ phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching -
+ * Sequence After Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0);
+
+ /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0);
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_ddi_buf_trans *trans;
+ u8 owned_lane_mask;
+ intel_wakeref_t wakeref;
+ int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(display->drm, !trans)) {
+ intel_lt_phy_transaction_end(encoder, wakeref);
+ return;
+ }
+
+ for (ln = 0; ln < crtc_state->lane_count; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+ int lane = ln / 2;
+ int tx = ln % 2;
+ u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1;
+
+ if (!(lane_mask & owned_lane_mask))
+ continue;
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx),
+ LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK,
+ LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) |
+ LT_PHY_TX_SWING(trans->entries[level].lt.txswing),
+ MB_WRITE_COMMITTED);
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor),
+ MB_WRITE_COMMITTED);
+ }
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state)
+{
+ int i, j;
+
+ drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
+ for (i = 0; i < 3; i++) {
+ drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
+ i, hw_state->config[i]);
+ }
+
+ for (i = 0; i <= 12; i++)
+ for (j = 3; j >= 0; j--)
+ drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
+ i, j, hw_state->data[i][j]);
+}
+
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ if (memcmp(&a->config, &b->config, sizeof(a->config)) != 0)
+ return false;
+
+ if (memcmp(&a->data, &b->data, sizeof(a->data)) != 0)
+ return false;
+
+ return true;
+}
+
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state)
+{
+ u8 owned_lane_mask;
+ u8 lane;
+ intel_wakeref_t wakeref;
+ int i, j, k;
+
+ pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
+ if (pll_state->tbt_mode)
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG);
+ pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
+ pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG);
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ pll_state->data[i][k] =
+ intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j));
+ }
+
+ pll_state->clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_digital_port *dig_port;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+ struct intel_lt_phy_pll_state pll_hw_state = {};
+ const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
+ int clock;
+ int i, j;
+
+ if (DISPLAY_VER(display) < 35)
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state);
+ clock = intel_lt_phy_calc_port_clock(encoder, new_crtc_state);
+
+ dig_port = enc_to_dig_port(encoder);
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock,
+ "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ pll_sw_state->clock, pll_hw_state.clock);
+
+ for (i = 0; i < 3; i++) {
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ pll_sw_state->config[i], pll_hw_state.config[i]);
+ }
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3; j >= 0; j--)
+ INTEL_DISPLAY_STATE_WARN(display,
+ pll_hw_state.data[i][j] !=
+ pll_sw_state->data[i][j],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i, j,
+ pll_sw_state->data[i][j], pll_hw_state.data[i][j]);
+ }
+}
+
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_enable(encoder, crtc_state);
+ else
+ intel_lt_phy_pll_enable(encoder, crtc_state);
+}
+
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_disable(encoder);
+ else
+ intel_lt_phy_pll_disable(encoder);
+
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
new file mode 100644
index 000000000000..a538d4c69210
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_H__
+#define __INTEL_LT_PHY_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_display;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_crtc;
+struct intel_lt_phy_pll_state;
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state);
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b);
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state);
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
+
+#define HAS_LT_PHY(display) (DISPLAY_VER(display) >= 35)
+
+#endif /* __INTEL_LT_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
new file mode 100644
index 000000000000..9223487d764e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_REGS_H__
+#define __INTEL_LT_PHY_REGS_H__
+
+#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
+#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 1
+#define XE3PLPD_MACCLK_TURNON_LATENCY_US 21
+#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
+#define XE3PLPD_RATE_CALIB_DONE_LATENCY_US 50
+#define XE3PLPD_RESET_START_LATENCY_US 10
+#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
+#define XE3PLPD_RESET_END_LATENCY_US 200
+
+/* LT Phy MAC Register */
+#define LT_PHY_MAC_VDR _MMIO(0xC00)
+#define LT_PHY_PCLKIN_GATE REG_BIT8(0)
+
+/* LT Phy Pipe Spec Registers */
+#define LT_PHY_TXY_CTL8(idx) (0x408 + (0x200 * (idx)))
+#define LT_PHY_TX_SWING_LEVEL_MASK REG_GENMASK8(7, 4)
+#define LT_PHY_TX_SWING_LEVEL(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_LEVEL_MASK, val)
+#define LT_PHY_TX_SWING_MASK REG_BIT8(3)
+#define LT_PHY_TX_SWING(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_MASK, val)
+
+#define LT_PHY_TXY_CTL2(idx) (0x402 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL3(idx) (0x403 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL4(idx) (0x404 + (0x200 * (idx)))
+#define LT_PHY_TX_CURSOR_MASK REG_GENMASK8(5, 0)
+#define LT_PHY_TX_CURSOR(val) REG_FIELD_PREP8(LT_PHY_TX_CURSOR_MASK, val)
+
+#define LT_PHY_TXY_CTL10(idx) (0x40A + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL10_MAC(idx) _MMIO(LT_PHY_TXY_CTL10(idx))
+#define LT_PHY_TX_LANE_ENABLE REG_BIT8(0)
+
+/* LT Phy Vendor Register */
+#define LT_PHY_VDR_0_CONFIG 0xC02
+#define LT_PHY_VDR_DP_PLL_ENABLE REG_BIT(7)
+#define LT_PHY_VDR_1_CONFIG 0xC03
+#define LT_PHY_VDR_RATE_ENCODING_MASK REG_GENMASK8(6, 3)
+#define LT_PHY_VDR_MODE_ENCODING_MASK REG_GENMASK8(2, 0)
+#define LT_PHY_VDR_2_CONFIG 0xCC3
+
+#define LT_PHY_VDR_X_ADDR_MSB(idx) (0xC04 + 0x6 * (idx))
+#define LT_PHY_VDR_X_ADDR_LSB(idx) (0xC05 + 0x6 * (idx))
+
+#define LT_PHY_VDR_X_DATAY(idx, y) ((0xC06 + (3 - (y))) + 0x6 * (idx))
+
+#define LT_PHY_RATE_UPDATE 0xCC4
+#define LT_PHY_RATE_CONTROL_VDR_UPDATE REG_BIT8(0)
+
+#define _XE3PLPD_PORT_BUF_CTL5(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) \
+ + 0x34)
+#define XE3PLPD_PORT_BUF_CTL5(port) _XE3PLPD_PORT_BUF_CTL5(__xe2lpd_port_idx(port))
+#define XE3PLPD_MACCLK_RESET_0 REG_BIT(11)
+#define XE3PLPD_MACCLK_RATE_MASK REG_GENMASK(4, 0)
+#define XE3PLPD_MACCLK_RATE_DEF REG_FIELD_PREP(XE3PLPD_MACCLK_RATE_MASK, 0x1F)
+
+#define _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) \
+ + 0x60 + (lane) * 0x4)
+#define XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(port, lane) _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(__xe2lpd_port_idx(port), \
+ lane)
+#define XE3LPD_PORT_P2M_ADDR_MASK REG_GENMASK(11, 0)
+#endif /* __INTEL_LT_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 8415f3d703ed..0dcb0597879a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -19,6 +19,7 @@
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_dbuf_bw.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -176,6 +177,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
intel_cdclk_crtc_disable_noatomic(crtc);
skl_wm_crtc_disable_noatomic(crtc);
intel_bw_crtc_disable_noatomic(crtc);
+ intel_dbuf_bw_crtc_disable_noatomic(crtc);
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
@@ -851,18 +853,23 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ crtc_state->plane_min_cdclk[plane->id]);
}
+ crtc_state->min_cdclk = intel_crtc_min_cdclk(crtc_state);
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] min_cdclk %d kHz\n",
+ crtc->base.base.id, crtc->base.name, crtc_state->min_cdclk);
+
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
}
@@ -872,6 +879,7 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
+ intel_dbuf_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index f2f6b9d9afa1..b361a77cd235 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -16,6 +16,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_lt_phy.h"
#include "intel_modeset_verify.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
@@ -246,6 +247,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
+ intel_lt_phy_pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
index 469e8a3cfb49..65359a36df48 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.c
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -5,8 +5,8 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_utils.h"
#include "intel_pch.h"
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -328,7 +328,7 @@ void intel_pch_detect(struct intel_display *display)
"Display disabled, reverting to NOP PCH\n");
display->pch_type = PCH_NOP;
} else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ if (intel_display_run_as_guest(display) && HAS_DISPLAY(display)) {
intel_virt_detect_pch(display, &id, &pch_type);
display->pch_type = pch_type;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9ae53679a041..cca880c7eed4 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -6,10 +6,10 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 68539e7c2a24..6dda496190e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
#include "intel_pfit_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 4faae6a2ae11..505c776c0585 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -46,7 +46,6 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_object.h"
-#include "i915_scheduler_types.h"
#include "i9xx_plane_regs.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
@@ -293,64 +292,21 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate);
}
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc)
+static void intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return 0;
+ return;
- old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->min_cdclk[plane->id] =
+ new_crtc_state->plane_min_cdclk[plane->id] =
plane->min_cdclk(new_crtc_state, plane_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk for the plane doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- old_crtc_state->min_cdclk[plane->id])
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk for the pipe doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
- return 0;
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- new_crtc_state->min_cdclk[plane->id],
- crtc->base.base.id, crtc->base.name,
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
- *need_cdclk_calc = true;
-
- return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -436,7 +392,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ crtc_state->plane_min_cdclk[plane->id] = 0;
plane_state->uapi.visible = false;
}
@@ -1173,7 +1129,6 @@ static int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *new_plane_state =
@@ -1222,8 +1177,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
goto unpin_fb;
if (new_plane_state->uapi.fence) {
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
+ i915_gem_fence_wait_priority_display(new_plane_state->uapi.fence);
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
@@ -1747,5 +1701,8 @@ int intel_plane_atomic_check(struct intel_atomic_state *state)
return ret;
}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ intel_plane_calc_min_cdclk(state, plane);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 8af41ccc0a69..4e99df9de3e8 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -69,9 +69,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc);
int intel_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index d806c15db7ce..f52abd4e2eb0 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -7,13 +7,14 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
+#include "intel_display_utils.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
#include "skl_watermark.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 327e0de86f1e..25692a547764 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -10,11 +10,12 @@
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a1ef3e08adcf..05014ffe3ce1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -40,6 +40,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -51,6 +52,7 @@
#include "intel_snps_phy.h"
#include "intel_step.h"
#include "intel_vblank.h"
+#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -581,6 +583,44 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
+static enum intel_panel_replay_dsc_support
+compute_pr_dsc_support(struct intel_dp *intel_dp)
+{
+ u8 pr_dsc_mode;
+ u8 val;
+
+ val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
+ pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
+
+ switch (pr_dsc_mode) {
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
+ return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
+ return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
+ default:
+ MISSING_CASE(pr_dsc_mode);
+ fallthrough;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
+ return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
+ }
+}
+
+static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
+{
+ switch (dsc_support) {
+ case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
+ return "not supported";
+ case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
+ return "full frame only";
+ case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
+ return "selective update";
+ default:
+ MISSING_CASE(dsc_support);
+ return "n/a";
+ };
+}
+
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -616,10 +656,13 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
+ intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp);
+
drm_dbg_kms(display->drm,
- "Panel replay %sis supported by panel\n",
+ "Panel replay %sis supported by panel (in DSC mode: %s)\n",
intel_dp->psr.sink_panel_replay_su_support ?
- "selective_update " : "");
+ "selective_update " : "",
+ panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support));
}
static void _psr_init_dpcd(struct intel_dp *intel_dp)
@@ -957,15 +1000,16 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static int psr2_block_count_lines(struct intel_dp *intel_dp)
+static int
+psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
{
- return intel_dp->alpm_parameters.io_wake_lines < 9 &&
- intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
+ return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
{
- return psr2_block_count_lines(intel_dp) / 4;
+ return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
+ intel_dp->psr.fast_wake_lines) / 4;
}
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
@@ -1060,20 +1104,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[intel_dp->alpm_parameters.io_wake_lines -
+ tmp = map[intel_dp->psr.io_wake_lines -
TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(display) >= 20) {
- val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
+ val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
} else if (DISPLAY_VER(display) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(display) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -1361,22 +1405,54 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
return entry_setup_frames;
}
-static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+static
+int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
- struct intel_display *display = to_intel_display(intel_dp);
- int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
- crtc_state->hw.adjusted_mode.crtc_vblank_start;
- int wake_lines;
+ struct intel_display *display = to_intel_display(crtc_state);
- if (aux_less)
- wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
+
+ if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
+ needs_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
else
- wake_lines = DISPLAY_VER(display) < 20 ?
- psr2_block_count_lines(intel_dp) :
- intel_dp->alpm_parameters.io_wake_lines;
+ return 1;
+}
+static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
+ int vblank,
+ int wake_lines)
+{
if (crtc_state->req_psr2_sdp_prior_scanline)
vblank -= 1;
@@ -1387,9 +1463,46 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
return true;
}
+static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
+ crtc_state->hw.adjusted_mode.crtc_vblank_start;
+ int wake_lines;
+ int scl = _intel_psr_min_set_context_latency(crtc_state,
+ needs_panel_replay,
+ needs_sel_update);
+ vblank -= scl;
+
+ if (aux_less)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+
+ /*
+ * Guardband has not been computed yet, so we conservatively check if the
+ * full vblank duration is sufficient to accommodate wake line requirements
+ * for PSR features like Panel Replay and Selective Update.
+ *
+ * Once the actual guardband is available, a more accurate validation is
+ * performed in intel_psr_compute_config_late(), and PSR features are
+ * disabled if wake lines exceed the available guardband.
+ */
+ return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
+}
+
static bool alpm_config_valid(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+ struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1399,7 +1512,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
+ if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
+ needs_panel_replay, needs_sel_update)) {
drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
@@ -1491,7 +1605,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, false))
+ if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
return false;
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -1536,9 +1650,21 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
- !intel_dp->psr.sink_panel_replay_su_support))
- goto unsupported;
+ if (crtc_state->has_panel_replay) {
+ if (DISPLAY_VER(display) < 14)
+ goto unsupported;
+
+ if (!intel_dp->psr.sink_panel_replay_su_support)
+ goto unsupported;
+
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support !=
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
+ drm_dbg_kms(display->drm,
+ "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
+ goto unsupported;
+ }
+ }
if (crtc_state->crc_enabled) {
drm_dbg_kms(display->drm,
@@ -1583,6 +1709,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
+ crtc_state->no_psr_reason = "PSR setup timing not met";
drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
@@ -1593,7 +1720,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
static bool
_panel_replay_compute_config(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1615,6 +1742,14 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it's not supported with DSC\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1642,7 +1777,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, true))
+ if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
return false;
return true;
@@ -1657,15 +1792,40 @@ static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
!crtc_state->has_sel_update);
}
+static
+void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
+
+ /* Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_crtc *crtc;
- u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1695,6 +1855,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /* Only used for state verification. */
+ crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support;
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1706,31 +1868,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
-
- /* Wa_18037818876 */
- if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
- crtc_state->has_psr = false;
- drm_dbg_kms(display->drm,
- "PSR disabled to workaround PSR FSM hang issue\n");
- }
-
- /* Rest is for Wa_16025596647 */
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
- return;
-
- /* Not needed by Panel Replay */
- if (crtc_state->has_panel_replay)
- return;
-
- /* We ignore possible secondary PSR/Panel Replay capable eDP */
- for_each_intel_crtc(display->drm, crtc)
- active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
-
- active_pipes = intel_calc_active_pipes(state, active_pipes);
-
- crtc_state->active_non_psr_pipes = active_pipes &
- ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1814,6 +1951,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp);
intel_dp->psr.active = true;
+ intel_dp->psr.no_psr_reason = NULL;
}
/*
@@ -2023,6 +2161,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
crtc_state->req_psr2_sdp_prior_scanline;
intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
+ intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
+ intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2361,50 +2501,17 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
}
/**
- * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
* @crtc_state: the crtc state
*
- * Return minimum vblank delay needed by PSR.
+ * Return minimum SCL lines/delay needed by PSR.
*/
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- if (!crtc_state->has_psr)
- return 0;
-
- /* Wa_14015401596 */
- if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
- return 1;
-
- /* Rest is for SRD_STATUS needed on LunarLake and onwards */
- if (DISPLAY_VER(display) < 20)
- return 0;
-
- /*
- * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
- *
- * To deterministically capture the transition of the state machine
- * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
- * one line after the non-delayed V. Blank.
- *
- * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
- * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
- * - TRANS_VTOTAL[ Vertical Active ])
- *
- * SRD_STATUS is used only by PSR1 on PantherLake.
- * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
- */
-
- if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
- crtc_state->has_sel_update))
- return 0;
- else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
- intel_crtc_has_type(crtc_state,
- INTEL_OUTPUT_EDP)))
- return 0;
- else
- return 1;
+ return _intel_psr_min_set_context_latency(crtc_state,
+ crtc_state->has_panel_replay,
+ crtc_state->has_sel_update);
}
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
@@ -2926,6 +3033,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
+ if (!new_crtc_state->has_psr)
+ psr->no_psr_reason = new_crtc_state->no_psr_reason;
+
if (psr->enabled) {
/*
* Reasons to disable:
@@ -2952,6 +3062,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
}
}
+static void
+verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_panel_replay)
+ return;
+
+ drm_WARN_ON(display->drm,
+ intel_dsc_enabled_on_link(crtc_state) &&
+ crtc_state->panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
+}
+
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2963,6 +3087,8 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
if (!crtc_state->has_psr)
return;
+ verify_panel_replay_dsc_state(crtc_state);
+
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2974,12 +3100,19 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
drm_WARN_ON(display->drm,
psr->enabled && !crtc_state->active_planes);
- keep_disabled |= psr->sink_not_reliable;
- keep_disabled |= !crtc_state->active_planes;
+ if (psr->sink_not_reliable)
+ keep_disabled = true;
+
+ if (!crtc_state->active_planes) {
+ psr->no_psr_reason = "All planes inactive";
+ keep_disabled = true;
+ }
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(display) < 11 &&
- crtc_state->wm_level_disabled;
+ if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
+ psr->no_psr_reason = "Workaround #1136 for skl, bxt";
+ keep_disabled = true;
+ }
if (!psr->enabled && !keep_disabled)
intel_psr_enable_locked(intel_dp, crtc_state);
@@ -3992,6 +4125,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
+ seq_printf(m, ", Panel Replay DSC support = %s",
+ panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support));
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
@@ -4026,6 +4161,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
region_et = "";
seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
+ if (psr->no_psr_reason)
+ seq_printf(m, " %s\n", psr->no_psr_reason);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -4323,3 +4460,84 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
{
return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
}
+
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = intel_crtc_vblank_length(crtc_state);
+ int wake_lines;
+
+ if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (intel_psr_needs_alpm(intel_dp, crtc_state))
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ wake_lines = 0;
+
+ /*
+ * Disable the PSR features if wake lines exceed the available vblank.
+ * Though SCL is computed based on these PSR features, it is not reset
+ * even if the PSR features are disabled to avoid changing vblank start
+ * at this stage.
+ */
+ if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
+ drm_dbg_kms(display->drm,
+ "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
+ wake_lines);
+
+ if (crtc_state->has_panel_replay) {
+ crtc_state->has_panel_replay = false;
+ /*
+ * #TODO : Add fall back to PSR/PSR2
+ * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
+ * This will require calling compute_config for psr and psr2 with check for
+ * actual guardband instead of vblank_length.
+ */
+ crtc_state->has_psr = false;
+ }
+
+ crtc_state->has_sel_update = false;
+ crtc_state->enable_psr2_su_region_et = false;
+ crtc_state->enable_psr2_sel_fetch = false;
+ }
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
+
+ intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
+}
+
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int psr_min_guardband;
+ int wake_lines;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return 0;
+
+ if (crtc_state->has_panel_replay)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (crtc_state->has_sel_update)
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ return 0;
+
+ psr_min_guardband = wake_lines + crtc_state->set_context_latency;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
+ psr_min_guardband++;
+
+ return psr_min_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 077751aa599f..620b35928832 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -77,11 +77,14 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 600c815e37e4..c05d4beb91d8 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -5,7 +5,7 @@
#include <drm/display/drm_dsc.h>
-#include "i915_utils.h"
+#include "intel_display_utils.h"
#include "intel_qp_tables.h"
/* from BPP 6 to 24 in steps of 0.5 */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 7fe6b4a18213..a201edceee10 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -332,6 +332,8 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6
c10_curve_1, c10_curve_2, prescaler_divider,
&pll_params);
+ pll_state->clock = pixel_clock;
+
pll_state->tx = 0x10;
pll_state->cmn = 0x1;
pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b2dd69a11124..4f028e6a91cd 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -7,12 +7,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
#include "intel_snps_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 75bbaa923204..60f1d9ed181e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -39,10 +39,10 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_plane.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c4a5601c5107..7e17ca018748 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -18,6 +17,7 @@
#include "intel_display_power_map.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
@@ -1703,6 +1703,19 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
mutex_unlock(&tc->lock);
}
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ intel_tc_port_lock(dig_port);
+ drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n",
+ tc->port_name,
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
+ intel_tc_port_unlock(dig_port);
+}
+
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index fff8b96e4972..6719aea5bd58 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_printer;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@@ -113,4 +114,6 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port);
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port);
+
#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index e1260c8fc527..671f357c6563 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -9,12 +9,13 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
@@ -682,7 +683,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ vblank_delay = crtc_state->set_context_latency;
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
@@ -768,3 +769,13 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
return scanline;
}
+
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable)
+ return crtc_state->vrr.guardband;
+ else
+ return adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 21fbb08d61d5..98d04cacd65f 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -48,4 +48,6 @@ const struct intel_crtc_state *
intel_pre_commit_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 8e799e225af1..0e727fc5e80c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -11,10 +11,10 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
@@ -372,6 +372,22 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
return 0;
}
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->dsc.compression_enabled_on_link = true;
+ crtc_state->dsc.compression_enable = true;
+}
+
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ drm_WARN_ON(display->drm, crtc_state->dsc.compression_enable &&
+ !crtc_state->dsc.compression_enabled_on_link);
+
+ return crtc_state->dsc.compression_enabled_on_link;
+}
+
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
@@ -1077,3 +1093,11 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
+
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ return 0x18000; /* 1.5 */
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 9e2812f99dd7..99f64ac54b27 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -20,6 +20,8 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state);
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
@@ -32,5 +34,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 3eed37f271b0..00cbc126fb36 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -10,8 +10,11 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_psr.h"
#include "intel_vrr.h"
#include "intel_vrr_regs.h"
+#include "skl_prefill.h"
+#include "skl_watermark.h"
#define FIXED_POINT_PRECISION 100
#define CMRR_PRECISION_TOLERANCE 10
@@ -22,6 +25,9 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
const struct drm_display_info *info = &connector->base.display_info;
struct intel_dp *intel_dp;
+ if (!HAS_VRR(display))
+ return false;
+
/*
* DP Sink is capable of VRR video timings if
* Ignore MSA bit is set in DPCD.
@@ -46,8 +52,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
}
- return HAS_VRR(display) &&
- info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+ return info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
@@ -79,44 +84,42 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
}
}
-static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->hw.adjusted_mode.crtc_vblank_start -
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
-}
-
static int intel_vrr_extra_vblank_delay(struct intel_display *display)
{
/*
* On ICL/TGL VRR hardware inserts one extra scanline
* just after vactive, which pushes the vmin decision
- * boundary ahead accordingly. We'll include the extra
- * scanline in our vblank delay estimates to make sure
- * that we never underestimate how long we have until
- * the delayed vblank has passed.
+ * boundary ahead accordingly, and thus reduces the
+ * max guardband length by one scanline.
*/
return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_vmin_flipline_offset(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_real_vblank_delay(crtc_state) +
- intel_vrr_extra_vblank_delay(display);
+ /*
+ * ICL/TGL hardware imposes flipline>=vmin+1
+ *
+ * We reduce the vmin value to compensate when programming the
+ * hardware. This approach allows flipline to remain set at the
+ * original value, and thus the frame will have the desired
+ * minimum vtotal.
+ */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-static int intel_vrr_flipline_offset(struct intel_display *display)
+static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state,
+ int guardband)
{
- /* ICL/TGL hardware imposes flipline>=vmin+1 */
- return DISPLAY_VER(display) < 13 ? 1 : 0;
+ /* hardware imposes one extra scanline somewhere */
+ return guardband - crtc_state->framestart_delay - 1;
}
-static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state,
+ int pipeline_full)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display);
+ /* hardware imposes one extra scanline somewhere */
+ return pipeline_full + crtc_state->framestart_delay + 1;
}
/*
@@ -135,48 +138,26 @@ static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
*
* framestart_delay is programmable 1-4.
*/
-static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.guardband;
- else
- /* hardware imposes one extra scanline somewhere */
- return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
-}
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
/* Min vblank actually determined by flipline */
- if (DISPLAY_VER(display) >= 13)
- return intel_vrr_vmin_flipline(crtc_state);
- else
- return intel_vrr_vmin_flipline(crtc_state) +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmin;
}
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.vmax;
- else
- return crtc_state->vrr.vmax +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmax;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmin_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmax_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
static bool
@@ -230,7 +211,6 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
static
void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
{
- crtc_state->cmrr.enable = true;
/*
* TODO: Compute precise target refresh rate to determine
* if video_mode_required should be true. Currently set to
@@ -240,52 +220,76 @@ void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
crtc_state->vrr.vmin = crtc_state->vrr.vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
+ crtc_state->cmrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
static
-void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state,
+ int vmin, int vmax)
{
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.vmin = vmin;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
-/*
- * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
- * Vtotal value.
- */
static
-int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /* For fixed rr, vmin = vmax = flipline */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+}
+
+static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state,
+ int value)
{
struct intel_display *display = to_intel_display(crtc_state);
- int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ /*
+ * On TGL vmin/vmax/flipline also need to be
+ * adjusted by the SCL to maintain correct vtotals.
+ */
if (DISPLAY_VER(display) >= 13)
- return crtc_vtotal;
+ return value;
else
- return crtc_vtotal -
- intel_vrr_real_vblank_delay(crtc_state);
+ return value - crtc_state->set_context_latency;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal);
}
static
-int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
static
-int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- return intel_vrr_fixed_rr_vtotal(crtc_state) -
- intel_vrr_flipline_offset(display);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state) -
+ intel_vrr_vmin_flipline_offset(display);
}
static
-int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
@@ -297,22 +301,11 @@ void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- intel_vrr_fixed_rr_flipline(crtc_state) - 1);
-}
-
-static
-void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
-{
- /*
- * For fixed rr, vmin = vmax = flipline.
- * vmin is already set to crtc_vtotal set vmax and flipline the same.
- */
- crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
- crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1);
}
static
@@ -384,60 +377,131 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
vmax = vmin;
}
- crtc_state->vrr.vmin = vmin;
- crtc_state->vrr.vmax = vmax;
-
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
-
if (crtc_state->uapi.vrr_enabled && vmin < vmax)
- intel_vrr_compute_vrr_timings(crtc_state);
+ intel_vrr_compute_vrr_timings(crtc_state, vmin, vmax);
else if (is_cmrr_frac_required(crtc_state) && is_edp)
intel_vrr_compute_cmrr_timings(crtc_state);
else
intel_vrr_compute_fixed_rr_timings(crtc_state);
- /*
- * flipline determines the min vblank length the hardware will
- * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
- * vmin by one to make sure we can get the actual min vblank length.
- */
- crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->hw.adjusted_mode.crtc_vsync_start);
crtc_state->vrr.vsync_end =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_end);
+ crtc_state->hw.adjusted_mode.crtc_vsync_end);
}
}
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
+static int
+intel_vrr_max_hw_guardband(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int max_pipeline_full = REG_FIELD_MAX(VRR_CTL_PIPELINE_FULL_MASK);
+
+ if (DISPLAY_VER(display) >= 13)
+ return REG_FIELD_MAX(XELPD_VRR_CTL_VRR_GUARDBAND_MASK);
+ else
+ return intel_vrr_pipeline_full_to_guardband(crtc_state,
+ max_pipeline_full);
+}
+
+static int
+intel_vrr_max_vblank_guardband(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ return crtc_state->vrr.vmin -
+ adjusted_mode->crtc_vdisplay -
+ crtc_state->set_context_latency -
+ intel_vrr_extra_vblank_delay(display);
+}
+
+static int
+intel_vrr_max_guardband(struct intel_crtc_state *crtc_state)
+{
+ return min(intel_vrr_max_hw_guardband(crtc_state),
+ intel_vrr_max_vblank_guardband(crtc_state));
+}
+
+static
+int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct skl_prefill_ctx prefill_ctx;
+ int prefill_latency_us;
+ int guardband = 0;
+
+ skl_prefill_init_worst(&prefill_ctx, crtc_state);
+
+ /*
+ * The SoC power controller runs SAGV mutually exclusive with package C states,
+ * so the max of package C and SAGV latencies is used to compute the min prefill guardband.
+ * PM delay = max(sagv_latency, pkgc_max_latency (highest enabled wm level 1 and up))
+ */
+ prefill_latency_us = max(display->sagv.block_time_us,
+ skl_watermark_max_latency(display, 1));
+
+ guardband = skl_prefill_min_guardband(&prefill_ctx,
+ crtc_state,
+ prefill_latency_us);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ guardband = max(guardband, intel_psr_min_guardband(crtc_state));
+ guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
+ }
+
+ return guardband;
+}
+
+static bool intel_vrr_use_optimized_guardband(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * #TODO: Enable optimized guardband for HDMI
+ * For HDMI lot of infoframes are transmitted a line or two after vsync.
+ * Since with optimized guardband the double bufferring point is at delayed vblank,
+ * we need to ensure that vsync happens after delayed vblank for the HDMI case.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return false;
+
+ return true;
+}
+
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int guardband;
+
if (!intel_vrr_possible(crtc_state))
return;
- if (DISPLAY_VER(display) >= 13) {
- crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start;
- } else {
- /* hardware imposes one extra scanline somewhere */
- crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
- crtc_state->framestart_delay - 1);
+ if (intel_vrr_use_optimized_guardband(crtc_state))
+ guardband = intel_vrr_compute_optimized_guardband(crtc_state);
+ else
+ guardband = crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+
+ crtc_state->vrr.guardband = min(guardband, intel_vrr_max_guardband(crtc_state));
+ if (intel_vrr_always_use_vrr_tg(display)) {
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vtotal - crtc_state->vrr.guardband;
/*
- * vmin/vmax/flipline also need to be adjusted by
- * the vblank delay to maintain correct vtotals.
+ * pipe_mode has already been derived from the
+ * original adjusted_mode, keep the two in sync.
*/
- crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state);
+ pipe_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vblank_start;
}
+
+ if (DISPLAY_VER(display) < 13)
+ crtc_state->vrr.pipeline_full =
+ intel_vrr_guardband_to_pipeline_full(crtc_state,
+ crtc_state->vrr.guardband);
}
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
@@ -461,6 +525,9 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ if (!HAS_VRR(display))
+ return;
+
/*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
@@ -489,7 +556,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
intel_vrr_set_fixed_rr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ if (!intel_vrr_always_use_vrr_tg(display))
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(crtc_state));
@@ -498,6 +565,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
TRANS_VRR_VSYNC(display, cpu_transcoder),
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
}
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -576,126 +655,128 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
return false;
}
-static
-void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /*
- * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
- * double buffering point and transmission line for VRR packets for
- * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
- * Since currently we support VRR only for DP/eDP, so this is programmed
- * to for Adaptive Sync SDP to Vsync start.
- */
- if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
- intel_de_write(display,
- EMP_AS_SDP_TL(display, cpu_transcoder),
- EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) -
+ intel_vrr_vmin_flipline_offset(display);
}
-void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax);
+}
+
+static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline);
+}
+
+static void intel_vrr_set_vrr_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
- return;
-
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
+ intel_vrr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
+ intel_vrr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_hw_flipline(crtc_state) - 1);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
+ bool cmrr_enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 vrr_ctl;
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
- }
- }
+ vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
+
+ /*
+ * FIXME this might be broken as bspec seems to imply that
+ * even VRR_CTL_CMRR_ENABLE is armed by TRANS_CMRR_N_HI
+ * when enabling CMRR (but not when disabling CMRR?).
+ */
+ if (cmrr_enable)
+ vrr_ctl |= VRR_CTL_CMRR_ENABLE;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), vrr_ctl);
}
-void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- if (!old_crtc_state->vrr.enable)
- return;
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
- }
+ if (intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000))
+ drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
- intel_vrr_set_fixed_rr_timings(old_crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
-void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!HAS_VRR(display))
+ if (!crtc_state->vrr.enable)
return;
- if (!intel_vrr_possible(crtc_state))
- return;
+ intel_vrr_set_vrr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- return;
- }
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, crtc_state->cmrr.enable);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ if (!old_crtc_state->vrr.enable)
+ return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
}
-void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- if (!HAS_VRR(display))
- return;
if (!intel_vrr_possible(crtc_state))
return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, false);
+}
- intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ if (!intel_vrr_possible(old_crtc_state))
+ return;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
}
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
{
return crtc_state->vrr.flipline &&
crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
- crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+ crtc_state->vrr.flipline == crtc_state->vrr.vmin;
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
@@ -720,14 +801,20 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
TRANS_CMRR_M_HI(display, cpu_transcoder));
}
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
- else
- if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ } else {
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) {
crtc_state->vrr.pipeline_full =
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ crtc_state->vrr.guardband =
+ intel_vrr_pipeline_full_to_guardband(crtc_state,
+ crtc_state->vrr.pipeline_full);
+ }
+ }
+
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
crtc_state->vrr.flipline = intel_de_read(display,
TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
@@ -736,6 +823,15 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ if (DISPLAY_VER(display) < 13) {
+ /* undo what intel_vrr_hw_value() does when writing the values */
+ crtc_state->vrr.flipline += crtc_state->set_context_latency;
+ crtc_state->vrr.vmax += crtc_state->set_context_latency;
+ crtc_state->vrr.vmin += crtc_state->set_context_latency;
+
+ crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display);
+ }
+
/*
* For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
* bits are not filled. Since for these platforms TRAN_VMIN is always
@@ -771,4 +867,34 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
*/
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ /*
+ * For platforms that always use the VRR timing generator, we overwrite
+ * crtc_vblank_start with vtotal - guardband to reflect the delayed
+ * vblank start. This works for both default and optimized guardband values.
+ * On other platforms, we keep the original value from
+ * intel_get_transcoder_timings() and apply adjustments only in VRR-specific
+ * paths as needed.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vblank_start =
+ crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->vrr.guardband;
+}
+
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30)
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay -
+ crtc_state->set_context_latency;
+ else
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay;
+}
+
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_vmin_vblank_start(crtc_state) -
+ crtc_state->set_context_latency;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 38bf9996b883..bc9044621635 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -21,7 +21,7 @@ bool intel_vrr_possible(const struct intel_crtc_state *crtc_state);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -35,11 +35,12 @@ int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.c b/drivers/gpu/drm/i915/display/skl_prefill.c
new file mode 100644
index 000000000000..4707c2e7127a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_print.h>
+
+#include "intel_cdclk.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
+#include "skl_watermark.h"
+
+static unsigned int prefill_usecs_to_lines(const struct intel_crtc_state *crtc_state,
+ unsigned int usecs)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, usecs << 16),
+ pipe_mode->crtc_htotal * 1000);
+}
+
+static void prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->prefill.fixed = crtc_state->framestart_delay << 16;
+
+ /* 20 usec for translation walks/etc. */
+ ctx->prefill.fixed += prefill_usecs_to_lines(crtc_state, 20);
+
+ ctx->prefill.dsc = intel_vdsc_prefill_lines(crtc_state);
+}
+
+static void prefill_init_nocdclk_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines_worst(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment_worst(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment_worst(crtc_state);
+}
+
+static void prefill_init_nocdclk(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment(crtc_state);
+}
+
+static unsigned int prefill_adjust(unsigned int value, unsigned int factor)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(value, factor), 0x10000);
+}
+
+static unsigned int prefill_lines_nocdclk(const struct skl_prefill_ctx *ctx)
+{
+ unsigned int prefill = 0;
+
+ prefill += ctx->prefill.dsc;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_2nd);
+
+ prefill += ctx->prefill.scaler_2nd;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_1st);
+
+ prefill += ctx->prefill.scaler_1st;
+ prefill += ctx->prefill.wm0;
+
+ return prefill;
+}
+
+static unsigned int prefill_lines_cdclk(const struct skl_prefill_ctx *ctx)
+{
+ return prefill_adjust(prefill_lines_nocdclk(ctx), ctx->adj.cdclk);
+}
+
+static unsigned int prefill_lines_full(const struct skl_prefill_ctx *ctx)
+{
+ return ctx->prefill.fixed + prefill_lines_cdclk(ctx);
+}
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk_worst(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment_worst(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+static unsigned int prefill_lines_with_latency(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ return ctx->prefill.full + prefill_usecs_to_lines(crtc_state, latency_us);
+}
+
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return DIV_ROUND_UP(prefill, 0x10000);
+}
+
+static unsigned int prefill_guardband(const struct intel_crtc_state *crtc_state)
+{
+ return intel_crtc_vblank_length(crtc_state) << 16;
+}
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int guardband = prefill_guardband(crtc_state);
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return guardband < prefill;
+}
+
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ unsigned int prefill_unadjusted = prefill_lines_nocdclk(ctx);
+ unsigned int prefill_available = prefill_guardband(crtc_state) - ctx->prefill.fixed;
+
+ return intel_cdclk_min_cdclk_for_prefill(crtc_state, prefill_unadjusted,
+ prefill_available);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.h b/drivers/gpu/drm/i915/display/skl_prefill.h
new file mode 100644
index 000000000000..028ee19b64ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __SKL_PREFILL_H__
+#define __SKL_PREFILL_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+struct skl_prefill_ctx {
+ /* .16 scanlines */
+ struct {
+ unsigned int fixed;
+ unsigned int wm0;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ unsigned int dsc;
+ unsigned int full;
+ } prefill;
+
+ /* .16 adjustment factors */
+ struct {
+ unsigned int cdclk;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ } adj;
+};
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __SKL_PREFILL_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index c6cccf170ff1..4c4deac7f9c8 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -5,11 +5,13 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_fb.h"
#include "skl_scaler.h"
@@ -282,7 +284,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
drm_rect_width(&crtc_state->pipe_src),
drm_rect_height(&crtc_state->pipe_src),
width, height, NULL, 0,
- crtc_state->pch_pfit.enabled);
+ crtc_state->pch_pfit.enabled ||
+ intel_casf_needs_scaler(crtc_state));
}
/**
@@ -321,7 +324,9 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
}
static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
- struct intel_crtc *crtc)
+ struct intel_crtc *crtc,
+ struct intel_plane_state *plane_state,
+ bool casf_scaler)
{
int i;
@@ -329,6 +334,10 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
if (scaler_state->scalers[i].in_use)
continue;
+ /* CASF needs second scaler */
+ if (!plane_state && casf_scaler && i != 1)
+ continue;
+
scaler_state->scalers[i].in_use = true;
return i;
@@ -379,7 +388,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
- int *scaler_id)
+ int *scaler_id, bool casf_scaler)
{
struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
@@ -388,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int vscale = 0;
if (*scaler_id < 0)
- *scaler_id = intel_allocate_scaler(scaler_state, crtc);
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc, plane_state, casf_scaler);
if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
@@ -520,10 +529,14 @@ static int setup_crtc_scaler(struct intel_atomic_state *state,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ if (intel_casf_needs_scaler(crtc_state) && crtc_state->pch_pfit.enabled)
+ return -EINVAL;
+
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "CRTC", crtc->base.base.id,
- NULL, &scaler_state->scaler_id);
+ NULL, &scaler_state->scaler_id,
+ intel_casf_needs_scaler(crtc_state));
}
static int setup_plane_scaler(struct intel_atomic_state *state,
@@ -558,7 +571,8 @@ static int setup_plane_scaler(struct intel_atomic_state *state,
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "PLANE", plane->base.base.id,
- plane_state, &plane_state->scaler_id);
+ plane_state, &plane_state->scaler_id,
+ false);
}
/**
@@ -738,6 +752,52 @@ static void skl_scaler_setup_filter(struct intel_display *display,
}
}
+#define CASF_SCALER_FILTER_SELECT \
+ (PS_FILTER_PROGRAMMED | \
+ PS_Y_VERT_FILTER_SELECT(0) | \
+ PS_Y_HORZ_FILTER_SELECT(0) | \
+ PS_UV_VERT_FILTER_SELECT(0) | \
+ PS_UV_HORZ_FILTER_SELECT(0))
+
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_rect src, dest;
+ int id, width, height;
+ int x = 0, y = 0;
+ enum pipe pipe = crtc->pipe;
+ u32 ps_ctrl;
+
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+
+ drm_rect_init(&dest, x, y, width, height);
+
+ width = drm_rect_width(&dest);
+ height = drm_rect_height(&dest);
+ id = scaler_state->scaler_id;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
+
+ ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
+ CASF_SCALER_FILTER_SELECT;
+
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
+ PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
+ PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
+}
+
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -921,16 +981,23 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
continue;
id = i;
- crtc_state->pch_pfit.enabled = true;
+
+ /* Read CASF regs for second scaler */
+ if (HAS_CASF(display) && id == 1)
+ intel_casf_sharpness_get_config(crtc_state);
+
+ if (!crtc_state->hw.casf_params.casf_enable)
+ crtc_state->pch_pfit.enabled = true;
pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
+ if (!crtc_state->hw.casf_params.casf_enable)
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
scaler_state->scalers[i].in_use = true;
break;
@@ -968,3 +1035,144 @@ void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state)
1);
intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0);
}
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 0)
+ return 4 << 16;
+
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 1 && crtc_state->pch_pfit.enabled)
+ return 4 << 16;
+
+ return 0;
+}
+
+static unsigned int _skl_scaler_max_scale(const struct intel_crtc_state *crtc_state,
+ unsigned int max_scale)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * Downscaling requires increasing cdclk, so max scale
+ * factor is limited to the max_dotclock/dotclock ratio.
+ *
+ * FIXME find out the max downscale factors properly
+ */
+ return min(max_scale, DIV_ROUND_UP_ULL((u64)display->cdclk.max_dotclk_freq << 16,
+ crtc_state->hw.pipe_mode.crtc_clock));
+}
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+ if (crtc->num_scalers > 1)
+ max_scale *= 9;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 3 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return 4 << 16;
+ else
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return 4 << 16;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 12a19016c5f6..7e8d819c019d 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -36,6 +36,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state);
+
enum drm_mode_status
skl_scaler_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode,
@@ -45,4 +47,19 @@ skl_scaler_mode_valid(struct intel_display *display,
void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state);
void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 4433655aa308..ba1bf0bd4c55 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -10,13 +10,12 @@
#include <drm/drm_print.h>
#include "pxp/intel_pxp.h"
-#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
@@ -25,6 +24,7 @@
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_step.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
@@ -390,44 +390,19 @@ static int glk_plane_max_width(const struct drm_framebuffer *fb,
}
}
+static int adl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 16 / fb->format->cpp[color_plane];
+}
+
static int icl_plane_min_width(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
{
/* Wa_14011264657, Wa_14011050563: gen11+ */
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- return 18;
- case DRM_FORMAT_RGB565:
- return 10;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- return 6;
- case DRM_FORMAT_NV12:
- return 20;
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return 12;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- return 4;
- default:
- return 1;
- }
+ return 16 / fb->format->cpp[color_plane] + 2;
}
static int xe3_plane_max_width(const struct drm_framebuffer *fb,
@@ -464,6 +439,23 @@ static int skl_plane_max_height(const struct drm_framebuffer *fb,
return 4096;
}
+static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
+{
+ return pipe - PIPE_A + INTEL_FBC_A;
+}
+
+static bool skl_plane_has_fbc(struct intel_display *display,
+ enum intel_fbc_id fbc_id, enum plane_id plane_id)
+{
+ if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
+ return false;
+
+ if (DISPLAY_VER(display) >= 20)
+ return icl_is_hdr_plane(display, plane_id);
+ else
+ return plane_id == PLANE_1;
+}
+
static int icl_plane_max_height(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
@@ -899,6 +891,25 @@ static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
}
+static void x3p_lpd_plane_update_pixel_normalizer(struct intel_dsb *dsb,
+ struct intel_plane *plane,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(plane->pipe);
+ u32 val;
+
+ /* Only HDR planes have pixel normalizer and don't matter if no FBC */
+ if (!skl_plane_has_fbc(display, fbc_id, plane->id))
+ return;
+
+ val = enable ? PLANE_PIXEL_NORMALIZE_NORM_FACTOR(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0) |
+ PLANE_PIXEL_NORMALIZE_ENABLE : 0;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PIXEL_NORMALIZE(plane->pipe, plane->id), val);
+}
+
static void
icl_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -914,6 +925,10 @@ icl_plane_disable_arm(struct intel_dsb *dsb,
skl_write_plane_wm(dsb, plane, crtc_state);
icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
+
+ if (DISPLAY_VER(display) >= 35)
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, false);
+
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1573,7 +1588,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
}
/* FLAT CCS doesn't need to program AUX_DIST */
- if (!HAS_FLAT_CCS(to_i915(display->drm)) && DISPLAY_VER(display) < 20)
+ if (HAS_AUX_CCS(display))
intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
skl_plane_aux_dist(plane_state, color_plane));
@@ -1644,6 +1659,14 @@ icl_plane_update_arm(struct intel_dsb *dsb,
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
/*
+ * In order to have FBC for fp16 formats pixel normalizer block must be
+ * active. Check if pixel normalizer block need to be enabled for FBC.
+ * If needed, use normalization factor as 1.0 and enable the block.
+ */
+ if (intel_fbc_is_enable_pixel_normalizer(plane_state))
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, true);
+
+ /*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
@@ -1781,8 +1804,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->hw.enable &&
- crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
fb->modifier != DRM_FORMAT_MOD_LINEAR &&
fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(display->drm,
@@ -1885,6 +1907,14 @@ static int intel_plane_min_width(struct intel_plane *plane,
return 1;
}
+static int intel_plane_min_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 1;
+}
+
static int intel_plane_max_width(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2016,6 +2046,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ int min_height = intel_plane_min_height(plane, fb, 0, rotation);
int max_width = intel_plane_max_width(plane, fb, 0, rotation);
int max_height = intel_plane_max_height(plane, fb, 0, rotation);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
@@ -2023,11 +2054,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height || h < 1) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, min_width, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2087,6 +2118,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
int uv_plane = 1;
int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ?
skl_main_to_aux_plane(fb, uv_plane) : 0;
+ int min_width = intel_plane_min_width(plane, fb, uv_plane, rotation);
+ int min_height = intel_plane_min_height(plane, fb, uv_plane, rotation);
int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
int x = plane_state->uapi.src.x1 >> 17;
@@ -2096,11 +2129,11 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
u32 offset;
/* FIXME not quite sure how/if these apply to the chroma plane */
- if (w > max_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] CbCr source size %dx%d too big (limit %dx%d)\n",
+ "[PLANE:%d:%s] requested CbCr source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2405,23 +2438,6 @@ void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state,
}
}
-static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
-{
- return pipe - PIPE_A + INTEL_FBC_A;
-}
-
-static bool skl_plane_has_fbc(struct intel_display *display,
- enum intel_fbc_id fbc_id, enum plane_id plane_id)
-{
- if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
- return false;
-
- if (DISPLAY_VER(display) >= 20)
- return icl_is_hdr_plane(display, plane_id);
- else
- return plane_id == PLANE_1;
-}
-
static struct intel_fbc *skl_plane_fbc(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2440,13 +2456,10 @@ static bool skl_plane_has_planar(struct intel_display *display,
if (display->platform.skylake || display->platform.broxton)
return false;
- if (DISPLAY_VER(display) == 9 && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_1 && plane_id != PLANE_2)
+ if (pipe == PIPE_C)
return false;
- return true;
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static const u32 *skl_get_plane_formats(struct intel_display *display,
@@ -2462,11 +2475,17 @@ static const u32 *skl_get_plane_formats(struct intel_display *display,
}
}
+static bool glk_plane_has_planar(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
+}
+
static const u32 *glk_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (skl_plane_has_planar(display, pipe, plane_id)) {
+ if (glk_plane_has_planar(display, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(glk_planar_formats);
return glk_planar_formats;
} else {
@@ -2706,8 +2725,10 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
static bool skl_plane_has_rc_ccs(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
- return pipe != PIPE_C &&
- (plane_id == PLANE_1 || plane_id == PLANE_2);
+ if (pipe == PIPE_C)
+ return false;
+
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static u8 skl_plane_caps(struct intel_display *display,
@@ -2835,11 +2856,15 @@ skl_universal_plane_create(struct intel_display *display,
intel_fbc_add_plane(skl_plane_fbc(display, pipe, plane_id), plane);
if (DISPLAY_VER(display) >= 30) {
+ plane->min_width = adl_plane_min_width;
plane->max_width = xe3_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(display) >= 11) {
- plane->min_width = icl_plane_min_width;
+ if (DISPLAY_VER(display) >= 14 || display->platform.alderlake_p)
+ plane->min_width = adl_plane_min_width;
+ else
+ plane->min_width = icl_plane_min_width;
if (icl_is_hdr_plane(display, plane_id))
plane->max_width = icl_hdr_plane_max_width;
else
@@ -2931,7 +2956,7 @@ skl_universal_plane_create(struct intel_display *display,
caps = skl_plane_caps(display, pipe, plane_id);
/* FIXME: xe has problems with AUX */
- if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(to_i915(display->drm)))
+ if (!IS_ENABLED(I915) && HAS_AUX_CCS(display))
caps &= ~(INTEL_PLANE_CAP_CCS_RC |
INTEL_PLANE_CAP_CCS_RC_CC |
INTEL_PLANE_CAP_CCS_MC);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index ca9fdfbbe57c..7c944d3ca855 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -455,4 +455,16 @@
_SEL_FETCH_PLANE_OFFSET_5_A, _SEL_FETCH_PLANE_OFFSET_5_B, \
_SEL_FETCH_PLANE_OFFSET_6_A, _SEL_FETCH_PLANE_OFFSET_6_B)
+#define _PLANE_PIXEL_NORMALIZE_1_A 0x701a8
+#define _PLANE_PIXEL_NORMALIZE_2_A 0x702a8
+#define _PLANE_PIXEL_NORMALIZE_1_B 0x711a8
+#define _PLANE_PIXEL_NORMALIZE_2_B 0x712a8
+#define PLANE_PIXEL_NORMALIZE(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_PIXEL_NORMALIZE_1_A, _PLANE_PIXEL_NORMALIZE_1_B, \
+ _PLANE_PIXEL_NORMALIZE_2_A, _PLANE_PIXEL_NORMALIZE_2_B)
+#define PLANE_PIXEL_NORMALIZE_ENABLE REG_BIT(31)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK REG_GENMASK(15, 0)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR(val) REG_FIELD_PREP(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK, (val))
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0 0x3c00
+
#endif /* __SKL_UNIVERSAL_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index d74cbb43ae6f..6d050408618c 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -10,7 +10,6 @@
#include "soc/intel_dram.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -23,12 +22,16 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
#include "intel_pcode.h"
#include "intel_plane.h"
+#include "intel_vblank.h"
#include "intel_wm.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
@@ -1636,26 +1639,11 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
return ret;
}
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+static int skl_wm_linetime_us(const struct intel_crtc_state *crtc_state,
+ int pixel_rate)
{
- struct intel_display *display = to_intel_display(crtc_state);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(display->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
+ return DIV_ROUND_UP(crtc_state->hw.pipe_mode.crtc_htotal * 1000,
+ pixel_rate);
}
static int
@@ -1743,7 +1731,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
- wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+ wp->linetime_us = skl_wm_linetime_us(crtc_state, plane_pixel_rate);
return 0;
}
@@ -1878,18 +1866,21 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
} else {
blocks++;
}
-
- /*
- * Make sure result blocks for higher latency levels are
- * at least as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
}
}
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ *
+ * Let's always do this as the algorithm can give non
+ * monotonic results on any platform.
+ */
+ blocks = max_t(u32, blocks, result_prev->blocks);
+ lines = max_t(u32, lines, result_prev->lines);
+
if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2157,103 +2148,55 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int
-cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state)) {
- drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
- return 1;
- }
-
- return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * intel_cdclk_logical(cdclk_state)));
-}
-
-static int
-dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- u32 dsc_prefill_latency = 0;
-
- if (!crtc_state->dsc.compression_enable ||
- !num_scaler_users ||
- num_scaler_users > crtc->num_scalers)
- return dsc_prefill_latency;
-
- dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
-
- for (int i = 0; i < num_scaler_users; i++) {
- u64 hscale_k, vscale_k;
-
- hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
- vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
- dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k,
- 1000000);
- }
-
- dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->primary);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ret, pixel_rate, width, level = 0;
+ const struct drm_format_info *info;
+ struct skl_wm_level wm = {};
+ struct skl_wm_params wp;
+ unsigned int latency;
+ u64 modifier;
+ u32 format;
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency);
-}
+ /* only expected to be used for VRR guardband calculation */
+ drm_WARN_ON(display->drm, !HAS_VRR(display));
-static int
-scaler_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int scaler_prefill_latency = 0;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
+ /* FIXME rather ugly to pick this by hand but maybe no better way? */
+ format = DRM_FORMAT_XBGR16161616F;
+ if (HAS_4TILE(display))
+ modifier = I915_FORMAT_MOD_4_TILED;
+ else
+ modifier = I915_FORMAT_MOD_Y_TILED;
- if (!num_scaler_users)
- return scaler_prefill_latency;
+ info = drm_get_format_info(display->drm, format, modifier);
- scaler_prefill_latency = 4 * linetime;
+ pixel_rate = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_total_scale(crtc_state),
+ pipe_mode->crtc_clock),
+ 0x10000);
- if (num_scaler_users > 1) {
- u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
- u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- int latency;
+ /* FIXME limit to max plane width? */
+ width = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_hscale(crtc_state),
+ pipe_mode->crtc_hdisplay),
+ 0x10000);
- latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k *
- chroma_downscaling_factor), 1000000);
- scaler_prefill_latency += latency;
- }
+ /* FIXME is 90/270 rotation worse than 0/180? */
+ ret = skl_compute_wm_params(crtc_state, width, info,
+ modifier, DRM_MODE_ROTATE_0,
+ pixel_rate, &wp, 0, 1);
+ drm_WARN_ON(display->drm, ret);
- scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ latency = skl_wm_latency(display, level, &wp);
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency);
-}
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
-static bool
-skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
- int wm0_lines, int latency)
-{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ /* FIXME is this sane? */
+ if (wm.min_ddb_alloc == U16_MAX)
+ wm.lines = skl_wm_max_lines(display);
- return crtc_state->framestart_delay +
- intel_usecs_to_scanlines(adjusted_mode, latency) +
- scaler_prefill_latency(crtc_state) +
- dsc_prefill_latency(crtc_state) +
- wm0_lines >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+ return wm.lines << 16;
}
static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
@@ -2272,15 +2215,21 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
return wm0_lines;
}
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ return skl_max_wm0_lines(crtc_state) << 16;
+}
+
/*
* TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
* size is too small for the package C exit latency we need to notify PSR about
* the scenario to apply Wa_16025596647.
*/
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
- int wm0_lines)
+ const struct skl_prefill_ctx *ctx)
{
struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int level;
for (level = display->wm.num_levels - 1; level >= 0; level--) {
@@ -2295,10 +2244,13 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
if (level == 0)
latency = 0;
- if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ if (!skl_prefill_vblank_too_short(ctx, crtc_state, latency))
return level;
}
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Not enough time in vblank for prefill\n",
+ crtc->base.base.id, crtc->base.name);
+
return -EINVAL;
}
@@ -2306,14 +2258,15 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int wm0_lines, level;
+ struct skl_prefill_ctx ctx;
+ int level;
if (!crtc_state->hw.active)
return 0;
- wm0_lines = skl_max_wm0_lines(crtc_state);
+ skl_prefill_init(&ctx, crtc_state);
- level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ level = skl_max_wm_level_for_vblank(crtc_state, &ctx);
if (level < 0)
return level;
@@ -2323,6 +2276,13 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
*/
crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
+ /*
+ * TODO: assert that we are in fact using the maximum guardband
+ * if we end up disabling any WM levels here. Otherwise we clearly
+ * failed in using a realistic worst case prefill estimate when
+ * determining the guardband size.
+ */
+
for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
@@ -2341,8 +2301,8 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) >= 12 &&
display->sagv.block_time_us &&
- skl_is_vblank_too_short(crtc_state, wm0_lines,
- display->sagv.block_time_us)) {
+ skl_prefill_vblank_too_short(&ctx, crtc_state,
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -3174,12 +3134,53 @@ void skl_watermark_ipc_init(struct intel_display *display)
skl_watermark_ipc_update(display);
}
-static void
-adjust_wm_latency(struct intel_display *display,
- u16 wm[], int num_levels, int read_latency)
+static void multiply_wm_latency(struct intel_display *display, int mult)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] *= mult;
+}
+
+static void increase_wm_latency(struct intel_display *display, int inc)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ wm[0] += inc;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
+ break;
+
+ wm[level] += inc;
+ }
+}
+
+static bool need_16gb_dimm_wa(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
- int i, level;
+
+ return (display->platform.skylake || display->platform.kabylake ||
+ display->platform.coffeelake || display->platform.cometlake ||
+ DISPLAY_VER(display) == 11) && dram_info->has_16gb_dimms;
+}
+
+static int wm_read_latency(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ return 6;
+ else if (DISPLAY_VER(display) >= 12)
+ return 3;
+ else
+ return 2;
+}
+
+static void sanitize_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
@@ -3187,14 +3188,38 @@ adjust_wm_latency(struct intel_display *display,
* of the punit to satisfy this requirement.
*/
for (level = 1; level < num_levels; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i < num_levels; i++)
- wm[i] = 0;
+ if (wm[level] == 0)
+ break;
+ }
+
+ for (level = level + 1; level < num_levels; level++)
+ wm[level] = 0;
+}
- num_levels = level;
+static void make_wm_latency_monotonic(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
break;
- }
+
+ wm[level] = max(wm[level], wm[level-1]);
}
+}
+
+static void
+adjust_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+
+ if (display->platform.dg2)
+ multiply_wm_latency(display, 2);
+
+ sanitize_wm_latency(display);
+
+ make_wm_latency_monotonic(display);
/*
* WaWmMemoryReadLatency
@@ -3203,24 +3228,22 @@ adjust_wm_latency(struct intel_display *display,
* to add proper adjustment to each valid level we retrieve
* from the punit when level 0 response data is 0us.
*/
- if (wm[0] == 0) {
- for (level = 0; level < num_levels; level++)
- wm[level] += read_latency;
- }
+ if (wm[0] == 0)
+ increase_wm_latency(display, wm_read_latency(display));
/*
- * WA Level-0 adjustment for 16Gb DIMMs: SKL+
+ * WA Level-0 adjustment for 16Gb+ DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get DIMM info assume 16Gb DIMM
+ * any underrun. If not able to get DIMM info assume 16Gb+ DIMM
* to avoid any underrun.
*/
- if (!display->platform.dg2 && dram_info->has_16gb_dimms)
- wm[0] += 1;
+ if (need_16gb_dimm_wa(display))
+ increase_wm_latency(display, 1);
}
-static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
+ u16 *wm = display->wm.skl_latency;
u32 val;
val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
@@ -3234,15 +3257,11 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
-
- adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
- int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
- int mult = display->platform.dg2 ? 2 : 1;
+ u16 *wm = display->wm.skl_latency;
u32 val;
int ret;
@@ -3254,10 +3273,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
@@ -3267,12 +3286,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
-
- adjust_wm_latency(display, wm, num_levels, read_latency);
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
}
static void skl_setup_wm_latency(struct intel_display *display)
@@ -3283,11 +3300,15 @@ static void skl_setup_wm_latency(struct intel_display *display)
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(display, display->wm.skl_latency);
+ mtl_read_wm_latency(display);
else
- skl_read_wm_latency(display, display->wm.skl_latency);
+ skl_read_wm_latency(display);
+
+ intel_print_wm_latency(display, "original", display->wm.skl_latency);
+
+ adjust_wm_latency(display);
- intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "adjusted", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 62790816f030..6bc2ec9164bf 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -79,5 +79,8 @@ void intel_program_dpkgc_latency(struct intel_atomic_state *state);
bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.c b/drivers/gpu/drm/i915/display/vlv_clock.c
new file mode 100644
index 000000000000..1abdae453514
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "vlv_clock.h"
+#include "vlv_sideband.h"
+
+/*
+ * FIXME: The caching of hpll_freq and czclk_freq relies on the first calls
+ * occurring at a time when they can actually be read. This appears to be the
+ * case, but is somewhat fragile. Make the initialization explicit at a point
+ * where they can be reliably read.
+ */
+
+/* returns HPLL frequency in kHz */
+int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ if (!display->vlv_clock.hpll_freq) {
+ vlv_cck_get(drm);
+ /* Obtain SKU information */
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ vlv_cck_put(drm);
+
+ display->vlv_clock.hpll_freq = vco_freq[hpll_freq] * 1000;
+
+ drm_dbg_kms(drm, "HPLL frequency: %d kHz\n", display->vlv_clock.hpll_freq);
+ }
+
+ return display->vlv_clock.hpll_freq;
+}
+
+static int vlv_clock_get_cck(struct drm_device *drm,
+ const char *name, u32 reg, int ref_freq)
+{
+ u32 val;
+ int divider;
+
+ vlv_cck_get(drm);
+ val = vlv_cck_read(drm, reg);
+ vlv_cck_put(drm);
+
+ divider = val & CCK_FREQUENCY_VALUES;
+
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
+
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
+ return vlv_clock_get_cck(drm, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!display->vlv_clock.czclk_freq) {
+ display->vlv_clock.czclk_freq = vlv_clock_get_cck(drm, "czclk", CCK_CZ_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+ drm_dbg_kms(drm, "CZ clock rate: %d kHz\n", display->vlv_clock.czclk_freq);
+ }
+
+ return display->vlv_clock.czclk_freq;
+}
+
+int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "cdclk", CCK_DISPLAY_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "GPLL ref", CCK_GPLL_CLOCK_CONTROL,
+ vlv_clock_get_czclk(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.h b/drivers/gpu/drm/i915/display/vlv_clock.h
new file mode 100644
index 000000000000..5742ed3c628d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __VLV_CLOCK_H__
+#define __VLV_CLOCK_H__
+
+struct drm_device;
+
+#ifdef I915
+int vlv_clock_get_hpll_vco(struct drm_device *drm);
+int vlv_clock_get_hrawclk(struct drm_device *drm);
+int vlv_clock_get_czclk(struct drm_device *drm);
+int vlv_clock_get_cdclk(struct drm_device *drm);
+int vlv_clock_get_gpll(struct drm_device *drm);
+#else
+static inline int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return 0;
+}
+#endif
+
+#endif /* __VLV_CLOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index c9a53fde79c4..444682995658 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -34,7 +34,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -42,6 +41,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 848ab8b5dd4a..b057c2fa03a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -143,7 +143,7 @@ enum {
* we want to leave the object where it is and for all the existing relocations
* to match. If the object is given a new address, or if userspace thinks the
* object is elsewhere, we have to parse all the relocation entries and update
- * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
+ * the addresses. Userspace can set the I915_EXEC_NO_RELOC flag to hint that
* all the target addresses in all of its objects match the value in the
* relocation entries and that they all match the presumed offsets given by the
* list of execbuffer objects. Using this knowledge, we know that if we haven't
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 75f5b0e871ef..4542135b20d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -16,12 +16,13 @@
#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
-#include "i915_gem_object.h"
#include "i915_gem_mman.h"
+#include "i915_gem_object.h"
+#include "i915_gem_ttm.h"
+#include "i915_jiffies.h"
#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 148034ef504d..8878539c10ed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -802,6 +802,7 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr);
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index b9dae15c1d16..26dda55a07ff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -441,11 +441,20 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
written = file->f_op->write_iter(&kiocb, &iter);
BUG_ON(written == -EIOCBQUEUED);
- if (written != size)
- return -EIO;
-
+ /*
+ * First, check if write_iter returned a negative error.
+ * If the write failed, return the real error code immediately.
+ * This prevents it from being overwritten by the short write check below.
+ */
if (written < 0)
return written;
+ /*
+ * Check for a short write (written bytes != requested size).
+ * Even if some data was written, return -EIO to indicate that the
+ * write was not fully completed.
+ */
+ if (written != size)
+ return -EIO;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0244087ca10d..f859c99f969b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -25,6 +25,11 @@
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
+struct intel_stolen_node {
+ struct drm_i915_private *i915;
+ struct drm_mm_node node;
+};
+
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -37,9 +42,9 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start, u64 end)
+static int __i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
{
int ret;
@@ -59,24 +64,43 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment)
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
+{
+ return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
+ size, alignment,
+ start, end);
+}
+
+static int __i915_gem_stolen_insert_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment)
{
- return i915_gem_stolen_insert_node_in_range(i915, node,
- size, alignment,
- I915_GEM_STOLEN_BIAS,
- U64_MAX);
+ return __i915_gem_stolen_insert_node_in_range(i915, node,
+ size, alignment,
+ I915_GEM_STOLEN_BIAS,
+ U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node)
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment)
+{
+ return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
+}
+
+static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node)
{
mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
mutex_unlock(&i915->mm.stolen_lock);
}
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ __i915_gem_stolen_remove_node(node->i915, &node->node);
+}
+
static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
{
return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
@@ -684,7 +708,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
i915_gem_object_release_memory_region(obj);
@@ -773,8 +797,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
} else {
- ret = i915_gem_stolen_insert_node(i915, stolen, size,
- mem->min_page_size);
+ ret = __i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
}
if (ret)
goto err_free;
@@ -786,7 +810,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
return 0;
err_remove:
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return ret;
@@ -1001,38 +1025,64 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
return obj->ops == &i915_gem_object_stolen_ops;
}
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+bool i915_gem_stolen_initialized(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return drm_mm_initialized(&i915->mm.stolen);
}
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return i915->dsm.stolen.start;
}
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return resource_size(&i915->dsm.stolen);
}
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
{
+ struct drm_i915_private *i915 = node->i915;
+
return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
}
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return drm_mm_node_allocated(&node->node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node)
{
- return drm_mm_node_allocated(node);
+ return node->node.start;
}
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
{
- return node->start;
+ return node->node.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->i915 = i915;
+
+ return node;
}
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
{
- return node->size;
+ kfree(node);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index dfe0db8bb1b9..7b0386002ed4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -8,21 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
-struct drm_mm_node;
+struct drm_device;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_stolen_node;
-#define i915_stolen_fb drm_mm_node
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node);
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
@@ -38,15 +34,17 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
#define I915_GEM_STOLEN_BIAS SZ_128K
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node);
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node);
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5a296ba3758a..567b97d28d30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -145,8 +145,9 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (GRAPHICS_VER(i915) == 2 ||
- (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+ if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))
+ tile_width = 128;
+ else if (GRAPHICS_VER(i915) == 2)
tile_width = 128;
else
tile_width = 512;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f7455c9c34d8..f65fe86c02b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -11,6 +11,7 @@
#include <drm/ttm/ttm_tt.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 54829801d3f7..2893df65c359 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -138,6 +138,13 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+
+ i915_gem_fence_wait_priority(fence, &attr);
+}
+
int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eb0158e43417..1330c0b431a7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -962,13 +962,14 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (IS_ERR(rpcs))
return PTR_ERR(rpcs);
+ i915_gem_ww_ctx_init(&ww, false);
+
batch = i915_vma_instance(rpcs, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_put;
}
- i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 8116fd5987e2..8c01fb6d4e7b 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -292,15 +292,15 @@ int gen4_emit_bb_start(struct i915_request *rq,
void gen2_irq_enable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
void gen2_irq_disable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
}
void gen5_irq_enable(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index c042b2031577..b279878dca29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -6,6 +6,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_request.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 88b147fa5cb1..c90b35881a26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -205,7 +205,7 @@ static u64 div_u64_roundup(u64 nom, u32 den)
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
+ return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency);
}
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
@@ -215,7 +215,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
+ return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC);
}
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 13721c9081b6..286d49ecc449 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
+#include "display/vlv_clock.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -804,7 +805,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
mul = 1000000;
- div = i915->czclk_freq;
+ div = vlv_clock_get_czclk(&i915->drm);
overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(uncore, reg);
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 4da94098bd3e..b01c837ab646 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,8 +7,8 @@
#include <drm/intel/i915_drm.h>
-#include "display/intel_display.h"
#include "display/intel_display_rps.h"
+#include "display/vlv_clock.h"
#include "soc/intel_dram.h"
#include "i915_drv.h"
@@ -1690,10 +1690,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->gpll_ref_freq =
- vlv_get_cck_clock(&i915->drm, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- i915->czclk_freq);
+ rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm);
drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
rps->gpll_ref_freq);
@@ -1703,13 +1700,13 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1737,13 +1734,13 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1780,6 +1777,7 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
@@ -1796,7 +1794,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
time = ktime_us_delta(now.ktime, prev->ktime);
- time *= rps_to_i915(rps)->czclk_freq;
+ time *= vlv_clock_get_czclk(&i915->drm);
/* Workload can be split between render + media,
* e.g. SwapBuffers being blitted in X after being rendered in
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 7d486dfa2fc1..ece88c612e27 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -2923,7 +2924,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(&engine->wa_list);
}
-static const struct i915_range mcr_ranges_gen8[] = {
+static const struct i915_mmio_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
@@ -2932,7 +2933,7 @@ static const struct i915_range mcr_ranges_gen8[] = {
{},
};
-static const struct i915_range mcr_ranges_gen12[] = {
+static const struct i915_mmio_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
@@ -2941,7 +2942,7 @@ static const struct i915_range mcr_ranges_gen12[] = {
{},
};
-static const struct i915_range mcr_ranges_xehp[] = {
+static const struct i915_mmio_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
@@ -2960,7 +2961,7 @@ static const struct i915_range mcr_ranges_xehp[] = {
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
- const struct i915_range *mcr_ranges;
+ const struct i915_mmio_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 0855140c2892..a06b397b6d42 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -13,6 +13,7 @@
#include "gt/intel_reset.h"
#include "gt/selftest_engine_heartbeat.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 5c676f98baf0..bbeba0d3fca8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1280,20 +1280,15 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
}
if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
- switch (cap_type_id) {
- case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = -EINVAL;
+ if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP)
ret = vfio_info_add_capability(&caps,
&sparse->header,
struct_size(sparse, areas,
sparse->nr_areas));
- if (ret) {
- kfree(sparse);
- return ret;
- }
- break;
- default:
+ if (ret) {
kfree(sparse);
- return -EINVAL;
+ return ret;
}
}
@@ -1362,21 +1357,27 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
+ if (!is_power_of_2(hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) ||
+ !is_power_of_2(hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+ if (!hdr.count)
+ return -EINVAL;
+
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size);
if (ret) {
- gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
- return -EINVAL;
- }
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ gvt_vgpu_err("vfio_set_irqs_validate_and_prepare failed\n");
+ return ret;
}
+
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
}
ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index c0cc3f8bab35..214eb7effa31 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -52,7 +52,7 @@
* @gpa: guest physical address
*
* Returns:
- * Zero on success, negative error code if failed
+ * The MMIO offset of the given GPA
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
@@ -61,7 +61,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
}
#define reg_is_mmio(gvt, reg) \
- (reg >= 0 && reg < gvt->device_info.mmio_size)
+ (reg < gvt->device_info.mmio_size)
#define reg_is_gtt(gvt, reg) \
(reg >= gvt->device_info.gtt_start_offset \
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index 24e5bb8a670e..3cb615ffa96d 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include "i915_config.h"
-#include "i915_utils.h"
+#include "i915_jiffies.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 89be8da79d3b..c97b76771917 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -46,6 +46,8 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include "display/i9xx_display_sr.h"
#include "display/intel_bw.h"
@@ -737,6 +739,18 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
"DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &i915_display_rpm_interface,
+};
+
+const struct intel_display_parent_interface *i915_driver_parent_interface(void)
+{
+ return &parent;
+}
+
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct drm_i915_private, drm, display);
+
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -758,7 +772,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return ERR_CAST(display);
@@ -1053,7 +1067,6 @@ static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1067,8 +1080,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_disable_user_access(display);
}
- pci_save_state(pdev);
-
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
@@ -1103,7 +1114,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
int ret, i;
@@ -1124,11 +1134,21 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_display_power_resume_early(display);
-
- goto out;
}
- pci_disable_device(pdev);
+ enable_rpm_wakeref_asserts(rpm);
+
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
+
+ return ret;
+}
+
+static int i915_drm_suspend_noirq(struct drm_device *dev, bool hibernation)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
/*
* During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
@@ -1140,21 +1160,20 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
* Acer Aspire 1830T
+ *
+ * pci_save_state() prevents drivers/pci from
+ * automagically putting the device into D3.
*/
- if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
- pci_set_power_state(pdev, PCI_D3hot);
-
-out:
- enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake_count)
- intel_runtime_pm_driver_release(rpm);
+ if (hibernation && GRAPHICS_VER(dev_priv) < 6)
+ pci_save_state(pdev);
- return ret;
+ return 0;
}
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
pm_message_t state)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int error;
if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
@@ -1168,7 +1187,14 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
if (error)
return error;
- return i915_drm_suspend_late(&i915->drm, false);
+ error = i915_drm_suspend_late(&i915->drm, false);
+ if (error)
+ return error;
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
static int i915_drm_resume(struct drm_device *dev)
@@ -1260,7 +1286,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1274,41 +1299,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
* similar so that power domains can be employed.
*/
- /*
- * Note that we need to set the power state explicitly, since we
- * powered off the device during freeze and the PCI core won't power
- * it back up for us during thaw. Powering off the device during
- * freeze is not a hard requirement though, and during the
- * suspend/resume phases the PCI core makes sure we get here with the
- * device powered on. So in case we change our freeze logic and keep
- * the device powered we can also remove the following set power state
- * call.
- */
- ret = pci_set_power_state(pdev, PCI_D0);
- if (ret) {
- drm_err(&dev_priv->drm,
- "failed to set PCI D0 power state (%d)\n", ret);
- return ret;
- }
-
- /*
- * Note that pci_enable_device() first enables any parent bridge
- * device and only then sets the power state for this device. The
- * bridge enabling is a nop though, since bridge devices are resumed
- * first. The order of enabling power and enabling the device is
- * imposed by the PCI core as described above, so here we preserve the
- * same order for the freeze/thaw phases.
- *
- * TODO: eventually we should remove pci_disable_device() /
- * pci_enable_enable_device() from suspend/resume. Due to how they
- * depend on the device enable refcount we can't anyway depend on them
- * disabling/enabling the device.
- */
- if (pci_enable_device(pdev))
- return -EIO;
-
- pci_set_master(pdev);
-
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1328,11 +1318,18 @@ static int i915_drm_resume_early(struct drm_device *dev)
int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int ret;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret)
+ return ret;
+
+ pci_restore_state(pdev);
+
ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
@@ -1389,6 +1386,16 @@ static int i915_pm_suspend_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, false);
}
+static int i915_pm_suspend_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, false);
+}
+
static int i915_pm_poweroff_late(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1399,6 +1406,16 @@ static int i915_pm_poweroff_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, true);
}
+static int i915_pm_poweroff_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, true);
+}
+
static int i915_pm_resume_early(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1664,24 +1681,25 @@ const struct dev_pm_ops i915_pm_ops = {
.prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
+ .suspend_noirq = i915_pm_suspend_noirq,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.complete = i915_pm_complete,
/*
* S4 event handlers
- * @freeze, @freeze_late : called (1) before creating the
- * hibernation image [PMSG_FREEZE] and
- * (2) after rebooting, before restoring
- * the image [PMSG_QUIESCE]
- * @thaw, @thaw_early : called (1) after creating the hibernation
- * image, before writing it [PMSG_THAW]
- * and (2) after failing to create or
- * restore the image [PMSG_RECOVER]
- * @poweroff, @poweroff_late: called after writing the hibernation
- * image, before rebooting [PMSG_HIBERNATE]
- * @restore, @restore_early : called after rebooting and restoring the
- * hibernation image [PMSG_RESTORE]
+ * @freeze* : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw* : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff* : called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore* : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_freeze,
.freeze_late = i915_pm_freeze_late,
@@ -1689,6 +1707,7 @@ const struct dev_pm_ops i915_pm_ops = {
.thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
+ .poweroff_noirq = i915_pm_poweroff_noirq,
.restore_early = i915_pm_restore_early,
.restore = i915_pm_restore,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 1e95ecb2a163..9551519ab429 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -12,6 +12,7 @@ struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_parent_interface;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
@@ -24,6 +25,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+const struct intel_display_parent_interface *i915_driver_parent_interface(void);
void
i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6a768aad8edd..95f9ddf22ce4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -174,6 +174,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* display device data, must be placed after drm device member */
struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
@@ -234,14 +235,11 @@ struct drm_i915_private {
/* Sideband mailbox protection */
struct mutex sb_lock;
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask;
+ /* Cached value of gen 2-4 IMR to avoid reads in updating the bitfield */
+ u32 gen2_imr_mask;
bool preserve_bios_swizzle;
- unsigned int hpll_freq;
- unsigned int czclk_freq;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -490,16 +488,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
-/*
- * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE,
- * so we need to define these even on platforms that the i915 base driver
- * doesn't support. Ensure the parameter is used in the definition to
- * avoid 'unused variable' warnings when compiling the shared display code
- * for i915.
- */
-#define IS_LUNARLAKE(i915) (0 && i915)
-#define IS_BATTLEMAGE(i915) (0 && i915)
-#define IS_PANTHERLAKE(i915) (0 && i915)
#define IS_ARROWLAKE_H(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
@@ -604,8 +592,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \
- !(IS_I915G(i915) || IS_I915GM(i915)))
+#define HAS_128_BYTE_Y_TILING(i915) (!IS_I915G(i915) && !IS_I915GM(i915))
#define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6)
#define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f57a981ecc9a..1898be4ddc8b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,12 +34,10 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
-#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
#include "display/intel_lpe_audio.h"
-#include "display/intel_psr_regs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_gt.h"
@@ -416,7 +414,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
struct drm_i915_private *i915 = arg;
struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
- u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ u32 gt_iir, de_ier = 0, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
if (unlikely(!intel_irqs_enabled(i915)))
@@ -425,19 +423,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /* disable master interrupt before clearing iir */
- de_ier = raw_reg_read(regs, DEIER);
- raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- if (!HAS_PCH_NOP(display)) {
- sde_ier = raw_reg_read(regs, SDEIER);
- raw_reg_write(regs, SDEIER, 0);
- }
+ /* Disable master and south interrupts */
+ ilk_display_irq_master_disable(display, &de_ier, &sde_ier);
/* Find, clear, then process each source of interrupt */
@@ -451,15 +438,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
- de_iir = raw_reg_read(regs, DEIIR);
- if (de_iir) {
- raw_reg_write(regs, DEIIR, de_iir);
- if (DISPLAY_VER(display) >= 7)
- ivb_display_irq_handler(display, de_iir);
- else
- ilk_display_irq_handler(display, de_iir);
+ if (ilk_display_irq_handler(display))
ret = IRQ_HANDLED;
- }
if (GRAPHICS_VER(i915) >= 6) {
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
@@ -470,9 +450,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
}
}
- raw_reg_write(regs, DEIER, de_ier);
- if (sde_ier)
- raw_reg_write(regs, SDEIER, sde_ier);
+ /* Re-enable master and south interrupts */
+ ilk_display_irq_master_enable(display, de_ier, sde_ier);
pmu_irq_stats(i915, ret);
@@ -657,22 +636,10 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_display *display = dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- gen2_irq_reset(uncore, DE_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
-
- if (GRAPHICS_VER(dev_priv) == 7)
- intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
-
- if (IS_HASWELL(dev_priv)) {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
- }
+ /* The master interrupt enable is in DEIER, reset display irq first */
+ ilk_display_irq_reset(display);
gen5_gt_irq_reset(to_gt(dev_priv));
-
- ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
@@ -827,9 +794,10 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
+#define I9XX_HAS_FBC(i915) (IS_I85X(i915) || IS_I865G(i915) || IS_I915GM(i915) || IS_I945GM(i915))
+
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
- struct intel_display *display = i915->display;
/*
* On gen2/3 FBC generates (seemingly spurious)
* display INVALID_GTT/INVALID_GTT_PTE table errors.
@@ -842,7 +810,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* Unfortunately we can't mask off individual PGTBL_ER bits,
* so we just have to mask off all page table errors via EMR.
*/
- if (HAS_FBC(display))
+ if (I9XX_HAS_FBC(i915))
return I915_ERROR_MEMORY_REFRESH;
else
return I915_ERROR_PAGE_TABLE |
@@ -898,7 +866,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -909,28 +877,14 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
- enable_mask =
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
-
- if (DISPLAY_VER(display) >= 3) {
- dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
- enable_mask |= I915_ASLE_INTERRUPT;
- }
+ dev_priv->gen2_imr_mask = ~enable_mask;
- if (HAS_HOTPLUG(display)) {
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- }
+ enable_mask |= I915_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i915_display_irq_postinstall(display);
}
@@ -959,8 +913,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (HAS_HOTPLUG(display) &&
- iir & I915_DISPLAY_PORT_INTERRUPT)
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
@@ -1000,7 +953,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static u32 i965_error_mask(struct drm_i915_private *i915)
@@ -1030,25 +983,17 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
-
- enable_mask =
- I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
+
+ dev_priv->gen2_imr_mask = ~enable_mask;
+
+ enable_mask |= I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i965_display_irq_postinstall(display);
}
diff --git a/drivers/gpu/drm/i915/i915_jiffies.h b/drivers/gpu/drm/i915/i915_jiffies.h
new file mode 100644
index 000000000000..18a4eaea897a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_jiffies.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_JIFFIES_H__
+#define __I915_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+#endif /* __I915_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.c b/drivers/gpu/drm/i915/i915_mmio_range.c
new file mode 100644
index 000000000000..724041e81aa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "i915_mmio_range.h"
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table)
+{
+ while (table->start || table->end) {
+ if (addr >= table->start && addr <= table->end)
+ return true;
+
+ table++;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.h b/drivers/gpu/drm/i915/i915_mmio_range.h
new file mode 100644
index 000000000000..f1c7086d3e3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __I915_MMIO_RANGE_H__
+#define __I915_MMIO_RANGE_H__
+
+#include <linux/types.h>
+
+/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
+struct i915_mmio_range {
+ u32 start;
+ u32 end;
+};
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table);
+
+#endif /* __I915_MMIO_RANGE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1658f1246c6f..0b9d9f3f7813 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -219,6 +219,7 @@
#include "i915_perf.h"
#include "i915_perf_oa_regs.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -4320,29 +4321,17 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
return false;
}
-static bool reg_in_range_table(u32 addr, const struct i915_range *table)
-{
- while (table->start || table->end) {
- if (addr >= table->start && addr <= table->end)
- return true;
-
- table++;
- }
-
- return false;
-}
-
#define REG_EQUAL(addr, mmio) \
((addr) == i915_mmio_reg_offset(mmio))
-static const struct i915_range gen7_oa_b_counters[] = {
+static const struct i915_mmio_range gen7_oa_b_counters[] = {
{ .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
{ .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
{ .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
{}
};
-static const struct i915_range gen12_oa_b_counters[] = {
+static const struct i915_mmio_range gen12_oa_b_counters[] = {
{ .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
{ .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
{ .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
@@ -4353,7 +4342,7 @@ static const struct i915_range gen12_oa_b_counters[] = {
{}
};
-static const struct i915_range mtl_oam_b_counters[] = {
+static const struct i915_mmio_range mtl_oam_b_counters[] = {
{ .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
{ .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
{ .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
@@ -4361,43 +4350,43 @@ static const struct i915_range mtl_oam_b_counters[] = {
{}
};
-static const struct i915_range xehp_oa_b_counters[] = {
+static const struct i915_mmio_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
{}
};
-static const struct i915_range gen7_oa_mux_regs[] = {
+static const struct i915_mmio_range gen7_oa_mux_regs[] = {
{ .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
{ .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
{ .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
{}
};
-static const struct i915_range hsw_oa_mux_regs[] = {
+static const struct i915_mmio_range hsw_oa_mux_regs[] = {
{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
{ .start = 0x25100, .end = 0x2ff90 },
{}
};
-static const struct i915_range chv_oa_mux_regs[] = {
+static const struct i915_mmio_range chv_oa_mux_regs[] = {
{ .start = 0x182300, .end = 0x1823a4 },
{}
};
-static const struct i915_range gen8_oa_mux_regs[] = {
+static const struct i915_mmio_range gen8_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
{ .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
{}
};
-static const struct i915_range gen11_oa_mux_regs[] = {
+static const struct i915_mmio_range gen11_oa_mux_regs[] = {
{ .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
{}
};
-static const struct i915_range gen12_oa_mux_regs[] = {
+static const struct i915_mmio_range gen12_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4410,7 +4399,7 @@ static const struct i915_range gen12_oa_mux_regs[] = {
* Ref: 14010536224:
* 0x20cc is repurposed on MTL, so use a separate array for MTL.
*/
-static const struct i915_range mtl_oa_mux_regs[] = {
+static const struct i915_mmio_range mtl_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4421,61 +4410,61 @@ static const struct i915_range mtl_oa_mux_regs[] = {
static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen7_oa_b_counters);
}
static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs);
}
static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs) ||
- reg_in_range_table(addr, gen11_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen11_oa_mux_regs);
}
static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, hsw_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, hsw_oa_mux_regs);
}
static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, chv_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, chv_oa_mux_regs);
}
static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen12_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen12_oa_b_counters);
}
static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
{
if (HAS_OAM(perf->i915) &&
GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oam_b_counters);
+ return i915_mmio_range_table_contains(addr, mtl_oam_b_counters);
return false;
}
static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, xehp_oa_b_counters) ||
- reg_in_range_table(addr, gen12_oa_b_counters) ||
+ return i915_mmio_range_table_contains(addr, xehp_oa_b_counters) ||
+ i915_mmio_range_table_contains(addr, gen12_oa_b_counters) ||
mtl_is_valid_oam_b_counter_addr(perf, addr);
}
static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, mtl_oa_mux_regs);
else
- return reg_in_range_table(addr, gen12_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen12_oa_mux_regs);
}
static u32 mask_reg_value(u32 reg, u32 val)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d31cced04f15..a6697db21c72 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -897,7 +897,7 @@ static ssize_t i915_pmu_format_show(struct device *dev,
struct i915_str_attribute *eattr;
eattr = container_of(attr, struct i915_str_attribute, attr);
- return sprintf(buf, "%s\n", eattr->str);
+ return sysfs_emit(buf, "%s\n", eattr->str);
}
#define I915_PMU_FORMAT_ATTR(_name, _config) \
@@ -927,7 +927,7 @@ static ssize_t i915_pmu_event_show(struct device *dev,
struct i915_ext_attribute *eattr;
eattr = container_of(attr, struct i915_ext_attribute, attr);
- return sprintf(buf, "config=0x%lx\n", eattr->val);
+ return sysfs_emit(buf, "config=0x%lx\n", eattr->val);
}
#define __event(__counter, __name, __unit) \
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index bfe98cb9a038..e81fac8ab51b 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -174,6 +174,16 @@
*/
#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index a0c892e4c40d..4f75115b87d6 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -38,8 +38,10 @@
struct drm_i915_private;
+#ifndef MISSING_CASE
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+#endif
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
@@ -65,11 +67,13 @@ bool i915_error_injected(void);
drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
+#ifndef fetch_and_zero
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
__T; \
})
+#endif
/*
* check_user_mbz: Check that a user value exists and is zero
@@ -100,43 +104,6 @@ static inline bool is_power_of_2_u64(u64 n)
return (n != 0 && ((n & (n - 1)) == 0));
}
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
- unsigned long j = msecs_to_jiffies(m);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
- /*
- * Don't re-read the value of "jiffies" every time since it may change
- * behind our back and break the math.
- */
- tmp_jiffies = jiffies;
- target_jiffies = timestamp_jiffies +
- msecs_to_jiffies_timeout(to_wait_ms);
-
- if (time_after(target_jiffies, tmp_jiffies)) {
- remaining_jiffies = target_jiffies - tmp_jiffies;
- while (remaining_jiffies)
- remaining_jiffies =
- schedule_timeout_uninterruptible(remaining_jiffies);
- }
-}
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7ce3e6de0c19..d11c2814b787 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -177,6 +178,82 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
return track_intel_runtime_pm_wakeref(rpm);
}
+static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return &i915->runtime_pm;
+}
+
+static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_raw(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_noresume(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_unchecked(const struct drm_device *drm)
+{
+ intel_runtime_pm_put_unchecked(drm_to_rpm(drm));
+}
+
+static bool i915_display_rpm_suspended(const struct drm_device *drm)
+{
+ return intel_runtime_pm_suspended(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_held(const struct drm_device *drm)
+{
+ assert_rpm_wakelock_held(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_block(const struct drm_device *drm)
+{
+ disable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_unblock(const struct drm_device *drm)
+{
+ enable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+const struct intel_display_rpm_interface i915_display_rpm_interface = {
+ .get = i915_display_rpm_get,
+ .get_raw = i915_display_rpm_get_raw,
+ .get_if_in_use = i915_display_rpm_get_if_in_use,
+ .get_noresume = i915_display_rpm_get_noresume,
+ .put = i915_display_rpm_put,
+ .put_raw = i915_display_rpm_put_raw,
+ .put_unchecked = i915_display_rpm_put_unchecked,
+ .suspended = i915_display_rpm_suspended,
+ .assert_held = i915_display_rpm_assert_held,
+ .assert_block = i915_display_rpm_assert_block,
+ .assert_unblock = i915_display_rpm_assert_unblock
+};
+
/**
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
* @rpm: the intel_runtime_pm structure
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 7428bd8fa67f..ed6c43b17f9a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -14,6 +14,7 @@
struct device;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_rpm_interface;
/*
* This struct helps tracking the state needed for runtime PM, which puts the
@@ -226,4 +227,6 @@ static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
}
#endif
+extern const struct intel_display_rpm_interface i915_display_rpm_interface;
+
#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 93acdf9edd34..4adeb271fcbf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -36,6 +36,7 @@
#include "i915_reg.h"
#include "i915_vgpu.h"
#include "i915_wait_util.h"
+#include "i915_mmio_range.h"
#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -1000,7 +1001,7 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
* scanned for obvious mistakes or typos by the selftests.
*/
-static const struct i915_range gen8_shadowed_regs[] = {
+static const struct i915_mmio_range gen8_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0xA008, .end = 0xA00C },
{ .start = 0x12030, .end = 0x12030 },
@@ -1008,7 +1009,7 @@ static const struct i915_range gen8_shadowed_regs[] = {
{ .start = 0x22030, .end = 0x22030 },
};
-static const struct i915_range gen11_shadowed_regs[] = {
+static const struct i915_mmio_range gen11_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2550, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1035,7 +1036,7 @@ static const struct i915_range gen11_shadowed_regs[] = {
{ .start = 0x1D8510, .end = 0x1D8550 },
};
-static const struct i915_range gen12_shadowed_regs[] = {
+static const struct i915_mmio_range gen12_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1079,7 +1080,7 @@ static const struct i915_range gen12_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range dg2_shadowed_regs[] = {
+static const struct i915_mmio_range dg2_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1118,7 +1119,7 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range mtl_shadowed_regs[] = {
+static const struct i915_mmio_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1136,7 +1137,7 @@ static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x22510, .end = 0x22550 },
};
-static const struct i915_range xelpmp_shadowed_regs[] = {
+static const struct i915_mmio_range xelpmp_shadowed_regs[] = {
{ .start = 0x1C0030, .end = 0x1C0030 },
{ .start = 0x1C0510, .end = 0x1C0550 },
{ .start = 0x1C8030, .end = 0x1C8030 },
@@ -1157,7 +1158,7 @@ static const struct i915_range xelpmp_shadowed_regs[] = {
{ .start = 0x38CFD4, .end = 0x38CFDC },
};
-static int mmio_range_cmp(u32 key, const struct i915_range *range)
+static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
{
if (key < range->start)
return -1;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 6048b99b96cb..fafc2ca9a237 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -123,12 +123,6 @@ struct intel_forcewake_range {
enum forcewake_domains domains;
};
-/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
-struct i915_range {
- u32 start;
- u32 end;
-};
-
struct intel_uncore {
void __iomem *regs;
@@ -162,7 +156,7 @@ struct intel_uncore {
* Shadowed registers are special cases where we can safely write
* to the register *without* grabbing forcewake.
*/
- const struct i915_range *shadowed_reg_table;
+ const struct i915_mmio_range *shadowed_reg_table;
unsigned int shadowed_reg_table_entries;
struct notifier_block pmic_bus_access_nb;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 9c276c9d0a75..8460f0a70d04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -30,6 +30,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
#include "i915_wait_util.h"
#include "igt_flush_test.h"
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 58bcbdcef563..507bf42a1aaf 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -64,7 +64,7 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
struct {
- const struct i915_range *regs;
+ const struct i915_mmio_range *regs;
unsigned int size;
} range_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
@@ -74,7 +74,7 @@ static int intel_shadow_table_check(void)
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
- const struct i915_range *range;
+ const struct i915_mmio_range *range;
unsigned int i, j;
s32 prev;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fb8751bd5df0..b59626c4994c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -33,6 +33,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "i915_driver.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -183,7 +184,8 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- display = intel_display_device_probe(pdev);
+ /* FIXME: Can we run selftests using a mock device without display? */
+ display = intel_display_device_probe(pdev, i915_driver_parent_interface());
if (IS_ERR(display))
goto err_device;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 3a31429453e8..2e16346a6cc0 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -336,7 +336,7 @@ static bool
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
{
/* Convert total Gb to Gb per DRAM device */
- return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+ return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) >= 16;
}
static void
@@ -355,7 +355,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
}
drm_dbg_kms(&i915->drm,
- "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
str_yes_no(skl_is_16gb_dimm(dimm)));
}
@@ -385,7 +385,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915,
ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
- drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
@@ -407,7 +407,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
u32 val;
int ret;
- /* Assume 16Gb DIMMs are present until proven otherwise */
+ /* Assume 16Gb+ DIMMs are present until proven otherwise */
dram_info->has_16gb_dimms = true;
val = intel_uncore_read(&i915->uncore,
@@ -439,7 +439,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
- drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n",
+ drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n",
str_yes_no(dram_info->has_16gb_dimms));
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 22ce61bdfc06..08f8ba4fd148 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -408,7 +408,6 @@ static int radeon_atif_handler(struct radeon_device *rdev,
pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(rdev_to_drm(rdev));
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9f6a3df951ba..012d8b2295b8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -875,10 +875,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1066,10 +1064,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1154,10 +1150,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1402,10 +1396,8 @@ out:
}
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1714,10 +1706,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 351b9dfcdad8..35fb99bcd9a7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -644,8 +644,6 @@ radeon_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
rdev = dev->dev_private;
/* if we have active crtcs and we don't have a power ref,
take the current one */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e821d67af7..26ad9adc5d8c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -262,6 +262,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
unsigned long flags = 0;
struct drm_device *ddev;
struct radeon_device *rdev;
+ struct device *dev = &pdev->dev;
const struct drm_format_info *format;
int ret;
@@ -277,7 +278,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- dev_info(&pdev->dev,
+ dev_info(dev,
"SI support disabled by module param\n");
return -ENODEV;
}
@@ -289,7 +290,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
- dev_info(&pdev->dev,
+ dev_info(dev,
"CIK support disabled by module param\n");
return -ENODEV;
}
@@ -303,28 +304,28 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- rdev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*rdev), ddev);
+ rdev = devm_drm_dev_alloc(dev, &kms_driver, typeof(*rdev), ddev);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- rdev->dev = &pdev->dev;
+ rdev->dev = dev;
rdev->pdev = pdev;
ddev = rdev_to_drm(rdev);
ddev->dev_private = rdev;
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
+ return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@@ -337,30 +338,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
-err_agp:
+err:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(ddev);
return ret;
}
static void
-radeon_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- /* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown
- */
- if (radeon_device_is_virtual())
- radeon_pci_remove(pdev);
-
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@@ -477,7 +462,6 @@ static int radeon_pmops_runtime_idle(struct device *dev)
}
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
return 1;
@@ -499,7 +483,6 @@ long radeon_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
@@ -613,7 +596,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 4df6c9167bf0..c2cfe2d7915f 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -154,7 +154,6 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user)
return 0;
err_pm_runtime_mark_last_busy:
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return ret;
}
@@ -164,7 +163,6 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user)
struct drm_fb_helper *fb_helper = info->par;
struct radeon_device *rdev = fb_helper->dev->dev_private;
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 645e33bf7947..7cbe02ffb193 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
- kfree(rdev);
dev->dev_private = NULL;
}
@@ -170,7 +169,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
@@ -677,7 +675,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -687,7 +684,6 @@ err_fpriv:
kfree(fpriv);
err_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
}
@@ -737,7 +733,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 3fbec058facc..f8a3a1bfe42e 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -205,7 +205,6 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/ext/i915_irq.o \
- display/ext/i915_utils.o \
display/intel_bo.o \
display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
@@ -218,6 +217,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_hdcp_gsc.o \
display/xe_panic.o \
display/xe_plane_initial.o \
+ display/xe_stolen.o \
display/xe_tdf.o
# SOC code shared with i915
@@ -234,6 +234,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
+ i915-display/intel_casf.o \
i915-display/intel_cdclk.o \
i915-display/intel_cmtg.o \
i915-display/intel_color.o \
@@ -243,6 +244,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_crtc_state_dump.o \
i915-display/intel_cursor.o \
i915-display/intel_cx0_phy.o \
+ i915-display/intel_dbuf_bw.o \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
@@ -254,7 +256,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_power.o \
i915-display/intel_display_power_map.o \
i915-display/intel_display_power_well.o \
+ i915-display/intel_display_rpm.o \
i915-display/intel_display_trace.o \
+ i915-display/intel_display_utils.o \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
@@ -291,6 +295,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_hti.o \
i915-display/intel_link_bw.o \
i915-display/intel_lspcon.o \
+ i915-display/intel_lt_phy.o \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
@@ -311,6 +316,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
i915-display/intel_wm.o \
+ i915-display/skl_prefill.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
i915-display/skl_watermark.o
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 8a048980ea38..0548b2e0316f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -5,10 +5,8 @@
#define __I915_GEM_OBJECT_H__
struct dma_fence;
-struct i915_sched_attr;
-static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
{
}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index f097fc6d5127..48e3256ba37e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -6,80 +6,35 @@
#ifndef _I915_GEM_STOLEN_H_
#define _I915_GEM_STOLEN_H_
-#include "xe_ttm_stolen_mgr.h"
-#include "xe_res_cursor.h"
-#include "xe_validation.h"
-
-struct xe_bo;
-
-struct i915_stolen_fb {
- struct xe_bo *bo;
-};
-
-static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align,
- u32 start, u32 end)
-{
- struct xe_bo *bo;
- int err = 0;
- u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
-
- if (start < SZ_4K)
- start = SZ_4K;
-
- if (align) {
- size = ALIGN(size, align);
- start = ALIGN(start, align);
- }
-
- bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
- size, start, end, ttm_bo_type_kernel, flags);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- bo = NULL;
- return err;
- }
-
- fb->bo = bo;
-
- return err;
-}
-
-static inline int i915_gem_stolen_insert_node(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align)
-{
- /* Not used on xe */
- BUG_ON(1);
- return -ENODEV;
-}
-
-static inline void i915_gem_stolen_remove_node(struct xe_device *xe,
- struct i915_stolen_fb *fb)
-{
- xe_bo_unpin_map_no_vm(fb->bo);
- fb->bo = NULL;
-}
-
-#define i915_gem_stolen_initialized(xe) (!!ttm_manager_type(&(xe)->ttm, XE_PL_STOLEN))
-#define i915_gem_stolen_node_allocated(fb) (!!((fb)->bo))
-
-static inline u32 i915_gem_stolen_node_offset(struct i915_stolen_fb *fb)
-{
- struct xe_res_cursor res;
-
- xe_res_first(fb->bo->ttm.resource, 0, 4096, &res);
- return res.start;
-}
-
-/* Used for < gen4. These are not supported by Xe */
-#define i915_gem_stolen_area_address(xe) (!WARN_ON(1))
-/* Used for gen9 specific WA. Gen9 is not supported by Xe */
-#define i915_gem_stolen_area_size(xe) (!WARN_ON(1))
-
-#define i915_gem_stolen_node_address(xe, fb) (xe_ttm_stolen_gpu_offset(xe) + \
- i915_gem_stolen_node_offset(fb))
-#define i915_gem_stolen_node_size(fb) ((u64)((fb)->bo->ttm.base.size))
+#include <linux/types.h>
+
+struct drm_device;
+struct intel_stolen_node;
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end);
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int align);
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
+
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index b8269391bc69..3e79a74ff7de 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
-#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -35,7 +34,4 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
deleted file mode 100644
index c11130440d31..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/* Copyright © 2025 Intel Corporation */
-
-#ifndef __I915_SCHEDULER_TYPES_H__
-#define __I915_SCHEDULER_TYPES_H__
-
-#define I915_PRIORITY_DISPLAY 0
-
-struct i915_sched_attr {
- int priority;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
index 1d7c4360e5c0..bcd441dc0fce 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
@@ -3,4 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/i915_utils.h"
+/* for soc/ */
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+/* for a couple of users under i915/display */
+#define i915_inject_probe_failure(unused) ((unused) && 0)
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
deleted file mode 100644
index 1421c2a7b64d..000000000000
--- a/drivers/gpu/drm/xe/display/ext/i915_utils.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-
-bool i915_vtd_active(struct drm_i915_private *i915)
-{
- if (device_iommu_mapped(i915->drm.dev))
- return true;
-
- /* Running as a guest, we assume the host is enforcing VT'd */
- return i915_run_as_guest();
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-/* i915 specific, just put here for shutting it up */
-int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index ebdb22c9499d..db8b1a27b4de 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -24,8 +24,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
xe_bo_put(bo);
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index af8139d00161..7ad76022cb14 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -3,11 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_ttm_stolen_mgr.h"
@@ -15,30 +12,22 @@
#include <generated/xe_device_wa_oob.h>
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+/*
+ * FIXME: There shouldn't be any reason to have XE_PAGE_SIZE stride
+ * alignment. The same 64 as i915 uses should be fine, and we shouldn't need to
+ * have driver specific values. However, dropping the stride alignment to 64
+ * leads to underflowing the bo pin count in the atomic cleanup work.
+ */
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct xe_device *xe = to_xe_device(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct xe_bo *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
+ return ALIGN(stride, XE_PAGE_SIZE);
+}
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_bo *obj;
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
@@ -62,33 +51,22 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj)) {
drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj);
- fb = ERR_PTR(-ENOMEM);
- goto err;
- }
-
- fb = intel_framebuffer_create(&obj->ttm.base,
- drm_get_format_info(dev,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- if (IS_ERR(fb)) {
- xe_bo_unpin_map_no_vm(obj);
- goto err;
+ return ERR_PTR(-ENOMEM);
}
- drm_gem_object_put(&obj->ttm.base);
-
- return to_intel_framebuffer(fb);
+ return &obj->ttm.base;
+}
-err:
- return ERR_CAST(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ xe_bo_unpin_map_no_vm(gem_to_xe_bo(obj));
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ struct pci_dev *pdev = to_pci_dev(drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 083c6904f8f1..8b0afa270216 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -13,6 +13,8 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
@@ -33,8 +35,12 @@
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "skl_watermark.h"
+#include "xe_display_rpm.h"
#include "xe_module.h"
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
+
/* Xe device functions */
/**
@@ -510,6 +516,10 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &xe_display_rpm_interface,
+};
+
/**
* xe_display_probe - probe display and create display struct
* @xe: XE device instance
@@ -530,7 +540,7 @@ int xe_display_probe(struct xe_device *xe)
if (!xe->info.probe_display)
goto no_display;
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return PTR_ERR(display);
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
index 3825376e98cc..340f65884812 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rpm.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -1,73 +1,74 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pm.h"
-static struct xe_device *display_to_xe(struct intel_display *display)
-{
- return to_xe_device(display->drm);
-}
-
-struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get(const struct drm_device *drm)
{
- return intel_display_rpm_get(display);
+ return xe_pm_runtime_resume_and_get(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+static struct ref_tracker *xe_display_rpm_get_if_in_use(const struct drm_device *drm)
{
- intel_display_rpm_put(display, wakeref);
+ return xe_pm_runtime_get_if_in_use(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get_noresume(const struct drm_device *drm)
{
- return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
-{
- return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
-{
- xe_pm_runtime_get_noresume(display_to_xe(display));
+ xe_pm_runtime_get_noresume(to_xe_device(drm));
return INTEL_WAKEREF_DEF;
}
-void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+static void xe_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
{
if (wakeref)
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-void intel_display_rpm_put_unchecked(struct intel_display *display)
+static void xe_display_rpm_put_unchecked(const struct drm_device *drm)
{
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-bool intel_display_rpm_suspended(struct intel_display *display)
+static bool xe_display_rpm_suspended(const struct drm_device *drm)
{
- struct xe_device *xe = display_to_xe(display);
+ struct xe_device *xe = to_xe_device(drm);
return pm_runtime_suspended(xe->drm.dev);
}
-void assert_display_rpm_held(struct intel_display *display)
+static void xe_display_rpm_assert_held(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_block(struct intel_display *display)
+static void xe_display_rpm_assert_block(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_unblock(struct intel_display *display)
+static void xe_display_rpm_assert_unblock(const struct drm_device *drm)
{
/* FIXME */
}
+
+const struct intel_display_rpm_interface xe_display_rpm_interface = {
+ .get = xe_display_rpm_get,
+ .get_raw = xe_display_rpm_get,
+ .get_if_in_use = xe_display_rpm_get_if_in_use,
+ .get_noresume = xe_display_rpm_get_noresume,
+ .put = xe_display_rpm_put,
+ .put_raw = xe_display_rpm_put,
+ .put_unchecked = xe_display_rpm_put_unchecked,
+ .suspended = xe_display_rpm_suspended,
+ .assert_held = xe_display_rpm_assert_held,
+ .assert_block = xe_display_rpm_assert_block,
+ .assert_unblock = xe_display_rpm_assert_unblock
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.h b/drivers/gpu/drm/xe/display/xe_display_rpm.h
new file mode 100644
index 000000000000..0bf9d31e87c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DISPLAY_RPM_H_
+#define _XE_DISPLAY_RPM_H_
+
+extern const struct intel_display_rpm_interface xe_display_rpm_interface;
+
+#endif /* _XE_DISPLAY_RPM_H_ */
diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c
index f32b23338331..df663286092a 100644
--- a/drivers/gpu/drm/xe/display/xe_panic.c
+++ b/drivers/gpu/drm/xe/display/xe_panic.c
@@ -8,20 +8,23 @@
#include "intel_fb.h"
#include "intel_panic.h"
#include "xe_bo.h"
+#include "xe_res_cursor.h"
struct intel_panic {
- struct page **pages;
+ struct xe_res_cursor res;
+ struct iosys_map vmap;
+
int page;
- void *vaddr;
};
static void xe_panic_kunmap(struct intel_panic *panic)
{
- if (panic->vaddr) {
- drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
- kunmap_local(panic->vaddr);
- panic->vaddr = NULL;
+ if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) {
+ drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
+ kunmap_local(panic->vmap.vaddr);
}
+ iosys_map_clear(&panic->vmap);
+ panic->page = -1;
}
/*
@@ -46,15 +49,29 @@ static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int
new_page = offset >> PAGE_SHIFT;
offset = offset % PAGE_SIZE;
if (new_page != panic->page) {
- xe_panic_kunmap(panic);
+ if (xe_bo_is_vram(bo)) {
+ /* Display is always mapped on root tile */
+ struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram;
+
+ if (panic->page < 0 || new_page < panic->page) {
+ xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
+ bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
+ } else {
+ xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
+ }
+ iosys_map_set_vaddr_iomem(&panic->vmap,
+ vram->mapping + panic->res.start);
+ } else {
+ xe_panic_kunmap(panic);
+ iosys_map_set_vaddr(&panic->vmap,
+ ttm_bo_kmap_try_from_panic(&bo->ttm,
+ new_page));
+ }
panic->page = new_page;
- panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
- panic->page);
- }
- if (panic->vaddr) {
- u32 *pix = panic->vaddr + offset;
- *pix = color;
}
+
+ if (iosys_map_is_set(&panic->vmap))
+ iosys_map_wr(&panic->vmap, offset, u32, color);
}
struct intel_panic *intel_panic_alloc(void)
@@ -68,6 +85,12 @@ struct intel_panic *intel_panic_alloc(void)
int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+
+ if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo))
+ return -ENODEV;
+
panic->page = -1;
sb->set_pixel = xe_panic_page_set_pixel;
return 0;
@@ -76,5 +99,4 @@ int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
void intel_panic_finish(struct intel_panic *panic)
{
xe_panic_kunmap(panic);
- panic->page = -1;
}
diff --git a/drivers/gpu/drm/xe/display/xe_stolen.c b/drivers/gpu/drm/xe/display/xe_stolen.c
new file mode 100644
index 000000000000..9f04ba36e930
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_stolen.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "gem/i915_gem_stolen.h"
+#include "xe_res_cursor.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_validation.h"
+
+struct intel_stolen_node {
+ struct xe_device *xe;
+ struct xe_bo *bo;
+};
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end)
+{
+ struct xe_device *xe = node->xe;
+
+ struct xe_bo *bo;
+ int err = 0;
+ u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
+
+ if (start < SZ_4K)
+ start = SZ_4K;
+
+ if (align) {
+ size = ALIGN(size, align);
+ start = ALIGN(start, align);
+ }
+
+ bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
+ size, start, end, ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ bo = NULL;
+ return err;
+ }
+
+ node->bo = bo;
+
+ return err;
+}
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, unsigned int align)
+{
+ /* Not used on xe */
+ WARN_ON(1);
+
+ return -ENODEV;
+}
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ xe_bo_unpin_map_no_vm(node->bo);
+ node->bo = NULL;
+}
+
+bool i915_gem_stolen_initialized(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+
+ return ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+}
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return node->bo;
+}
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node)
+{
+ struct xe_res_cursor res;
+
+ xe_res_first(node->bo->ttm.resource, 0, 4096, &res);
+ return res.start;
+}
+
+/* Used for < gen4. These are not supported by Xe */
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+/* Used for gen9 specific WA. Gen9 is not supported by Xe */
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node)
+{
+ struct xe_device *xe = node->xe;
+
+ return xe_ttm_stolen_gpu_offset(xe) + i915_gem_stolen_node_offset(node);
+}
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
+{
+ return node->bo->ttm.base.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->xe = xe;
+
+ return node;
+}
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
+{
+ kfree(node);
+}
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index 8cfcd3360896..5d41ca297447 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -31,6 +31,12 @@
#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
#define XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK GENMASK(23, 20)
+#define MEM_COPY_CMD (2 << 29 | 0x5a << 22 | 0x8)
+#define MEM_COPY_PAGE_COPY_MODE REG_BIT(19)
+#define MEM_COPY_MATRIX_COPY REG_BIT(17)
+#define MEM_COPY_SRC_MOCS_INDEX_MASK GENMASK(31, 28)
+#define MEM_COPY_DST_MOCS_INDEX_MASK GENMASK(6, 3)
+
#define PVC_MEM_SET_CMD (2 << 29 | 0x5b << 22)
#define PVC_MEM_SET_CMD_LEN_DW 7
#define PVC_MEM_SET_MATRIX REG_BIT(17)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 228de47c0f3f..a895a8e801a9 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -37,6 +37,12 @@
#define GMD_ID XE_REG(0xd8c)
#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+/*
+ * Spec defines these bits as "Reserved", but then make them assume some
+ * meaning that depends on the ARCH. To avoid any confusion, call them
+ * SUBIP_FLAG_MASK.
+ */
+#define GMD_ID_SUBIP_FLAG_MASK REG_GENMASK(13, 6)
#define GMD_ID_REVID REG_GENMASK(5, 0)
#define FORCEWAKE_ACK_GSC XE_REG(0xdf8)
@@ -168,6 +174,7 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define FAST_CLEAR_VALIGN_FIX REG_BIT(13)
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
@@ -544,6 +551,9 @@
#define SARB_CHICKEN1 XE_REG_MCR(0xe90c)
#define COMP_CKN_IN REG_GENMASK(30, 29)
+#define MAIN_GAMCTRL_MODE XE_REG(0xef00)
+#define MAIN_GAMCTRL_QUEUE_SELECT REG_BIT(0)
+
#define RCU_MODE XE_REG(0x14800, XE_REG_OPTION_MASKED)
#define RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define RCU_MODE_CCS_ENABLE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 37b344df2dc3..4d10a7e2b570 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -44,21 +44,27 @@ static void check_media_ip(struct kunit *test)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_platform_gt_count(struct kunit *test)
+static void check_platform_desc(struct kunit *test)
{
const struct pci_device_id *pci = test->param_value;
const struct xe_device_desc *desc =
(const struct xe_device_desc *)pci->driver_data;
- int max_gt = desc->max_gt_per_tile;
- KUNIT_ASSERT_GT(test, max_gt, 0);
- KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
+ KUNIT_EXPECT_GT(test, desc->dma_mask_size, 0);
+
+ KUNIT_EXPECT_GT(test, (unsigned int)desc->max_gt_per_tile, 0);
+ KUNIT_EXPECT_LE(test, (unsigned int)desc->max_gt_per_tile, XE_MAX_GT_PER_TILE);
+
+ KUNIT_EXPECT_GT(test, desc->va_bits, 0);
+ KUNIT_EXPECT_LE(test, desc->va_bits, 64);
+
+ KUNIT_EXPECT_GT(test, desc->vm_max_level, 0);
}
static struct kunit_case xe_pci_tests[] = {
KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
- KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
+ KUNIT_CASE_PARAM(check_platform_desc, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 7b6502081873..b0bd31d14bb9 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -610,6 +610,23 @@ static bool xe_ttm_resource_visible(struct ttm_resource *mem)
return vres->used_visible_size == mem->size;
}
+/**
+ * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM.
+ * @bo: The BO
+ *
+ * This function checks whether a given BO resides entirely in memory visible from the CPU
+ *
+ * Returns: true if the BO is entirely visible, false otherwise.
+ *
+ */
+bool xe_bo_is_visible_vram(struct xe_bo *bo)
+{
+ if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo)))
+ return false;
+
+ return xe_ttm_resource_visible(bo->ttm.resource);
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -1635,7 +1652,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) {
struct xe_migrate *migrate =
mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
@@ -2105,7 +2122,7 @@ void xe_bo_free(struct xe_bo *bo)
* if the function should allocate a new one.
* @tile: The tile to select for migration of this bo, and the tile used for
* GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
- * @resv: Pointer to a locked shared reservation object to use fo this bo,
+ * @resv: Pointer to a locked shared reservation object to use for this bo,
* or NULL for the xe_bo to use its own.
* @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
* @size: The storage size to use for the bo.
@@ -2259,6 +2276,12 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
struct ttm_place *place = bo->placements;
u32 vram_flag, vram_stolen_flags;
+ /*
+ * to allow fixed placement in GGTT of a VF, post-migration fixups would have to
+ * include selecting a new fixed offset and shifting the page ranges for it
+ */
+ xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT));
+
if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
@@ -2629,7 +2652,7 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
* @size: The storage size to use for the bo.
* @type: The TTM buffer object type.
* @flags: XE_BO_FLAG_ flags.
- * @intr: Whether to execut any waits for backing store interruptible.
+ * @intr: Whether to execute any waits for backing store interruptible.
*
* Create a pinned and mapped bo. The bo will be external and not associated
* with a VM.
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 353d607d301d..911d5b90461a 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -274,6 +274,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
+bool xe_bo_is_visible_vram(struct xe_bo *bo);
bool xe_bo_is_stolen(struct xe_bo *bo);
bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
bool xe_bo_is_vm_bound(struct xe_bo *bo);
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index 25a884c64bf1..401e7dd26ef3 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -12,7 +12,7 @@
* BO management
* =============
*
- * TTM manages (placement, eviction, etc...) all BOs in XE.
+ * TTM manages (placement, eviction, etc...) all BOs in Xe.
*
* BO creation
* ===========
@@ -29,7 +29,7 @@
* a kernel BO (e.g. engine state, memory for page tables, etc...). These BOs
* are typically mapped in the GGTT (any kernel BOs aside memory for page tables
* are in the GGTT), are pinned (can't move or be evicted at runtime), have a
- * vmap (XE can access the memory via xe_map layer) and have contiguous physical
+ * vmap (Xe can access the memory via xe_map layer) and have contiguous physical
* memory.
*
* More details of why kernel BOs are pinned and contiguous below.
@@ -40,7 +40,7 @@
* A user BO is created via the DRM_IOCTL_XE_GEM_CREATE IOCTL. Once it is
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
- * user BOs are evictable and user BOs are never pinned by XE. The allocation of
+ * user BOs are evictable and user BOs are never pinned by Xe. The allocation of
* the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
@@ -84,7 +84,7 @@
* ====================
*
* All eviction (or in other words, moving a BO from one memory location to
- * another) is routed through TTM with a callback into XE.
+ * another) is routed through TTM with a callback into Xe.
*
* Runtime eviction
* ----------------
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index c1419a270fa4..9f6251b1008b 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -27,7 +27,7 @@
* Overview
* ========
*
- * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
* configfs subsystem called ``xe`` that creates a directory in the mounted
* configfs directory. The user can create devices under this directory and
* configure them as necessary. See Documentation/filesystems/configfs.rst for
@@ -301,7 +301,6 @@ struct engine_info {
/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
#define MAX_ENGINE_CLASS_CHARS 5
#define MAX_ENGINE_INSTANCE_CHARS 2
-#define MAX_GT_TYPE_CHARS 7
static const struct engine_info engine_info[] = {
{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
@@ -313,7 +312,7 @@ static const struct engine_info engine_info[] = {
};
static const struct {
- const char name[MAX_GT_TYPE_CHARS + 1];
+ const char *name;
enum xe_gt_type type;
} gt_types[] = {
{ .name = "primary", .type = XE_GT_TYPE_MAIN },
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 58e7996160a0..86d5960476af 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -1217,7 +1217,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
*
* /sys/bus/pci/devices/<device>/survivability_mode
*
- * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash
+ * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
* firmware and restore device to normal operation.
*/
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9e3666a226da..af0ce275b032 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -222,12 +222,17 @@ struct xe_tile {
};
/**
- * struct xe_device - Top level struct of XE device
+ * struct xe_device - Top level struct of Xe device
*/
struct xe_device {
/** @drm: drm device */
struct drm_device drm;
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+ /** @display: display device data, must be placed after drm device member */
+ struct intel_display *display;
+#endif
+
/** @devcoredump: device coredump */
struct xe_devcoredump devcoredump;
@@ -245,9 +250,9 @@ struct xe_device {
u32 media_verx100;
/** @info.mem_region_mask: mask of valid memory regions */
u32 mem_region_mask;
- /** @info.platform: XE platform enum */
+ /** @info.platform: Xe platform enum */
enum xe_platform platform;
- /** @info.subplatform: XE subplatform enum */
+ /** @info.subplatform: Xe subplatform enum */
enum xe_subplatform subplatform;
/** @info.devid: device ID */
u16 devid;
@@ -300,6 +305,8 @@ struct xe_device {
* pcode mailbox commands.
*/
u8 has_mbx_power_limits:1;
+ /** @info.has_mem_copy_instr: Device supports MEM_COPY instruction */
+ u8 has_mem_copy_instr:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
/** @info.has_range_tlb_inval: Has range based TLB invalidations */
@@ -630,8 +637,6 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display *display;
-
const struct dram_info *dram_info;
/*
@@ -643,23 +648,14 @@ struct xe_device {
/* To shut up runtime pm macros.. */
struct xe_runtime_pm {} runtime_pm;
- /* only to allow build, not used functionally */
- u32 irq_mask;
-
struct intel_uncore {
spinlock_t lock;
} uncore;
-
- /* only to allow build, not used functionally */
- struct {
- unsigned int hpll_freq;
- unsigned int czclk_freq;
- };
#endif
};
/**
- * struct xe_file - file handle for XE driver
+ * struct xe_file - file handle for Xe driver
*/
struct xe_file {
/** @xe: xe DEVICE **/
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 0dc27476832b..521467d976f7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -33,7 +33,7 @@
* - Binding at exec time
* - Flow controlling the ring at exec time
*
- * In XE we avoid all of this complication by not allowing a BO list to be
+ * In Xe we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
* separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h
index 899fbbcb3ea9..12d6e2367455 100644
--- a/drivers/gpu/drm/xe/xe_force_wake_types.h
+++ b/drivers/gpu/drm/xe/xe_force_wake_types.h
@@ -52,7 +52,7 @@ enum xe_force_wake_domains {
};
/**
- * struct xe_force_wake_domain - XE force wake domains
+ * struct xe_force_wake_domain - Xe force wake domains
*/
struct xe_force_wake_domain {
/** @id: domain force wake id */
@@ -70,7 +70,7 @@ struct xe_force_wake_domain {
};
/**
- * struct xe_force_wake - XE force wake
+ * struct xe_force_wake - Xe force wake
*/
struct xe_force_wake {
/** @gt: back pointers to GT */
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 40680f0c49a1..20d226d90c50 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -312,6 +312,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ if (!ggtt->wq)
+ return -ENOMEM;
+
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d8e94fb8b9bd..89808b33d0a8 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -818,17 +818,19 @@ static int gt_reset(struct xe_gt *gt)
unsigned int fw_ref;
int err;
- if (xe_device_wedged(gt_to_xe(gt)))
- return -ECANCELED;
+ if (xe_device_wedged(gt_to_xe(gt))) {
+ err = -ECANCELED;
+ goto err_pm_put;
+ }
/* We only support GT resets with GuC submission */
- if (!xe_device_uc_enabled(gt_to_xe(gt)))
- return -ENODEV;
+ if (!xe_device_uc_enabled(gt_to_xe(gt))) {
+ err = -ENODEV;
+ goto err_pm_put;
+ }
xe_gt_info(gt, "reset started\n");
- xe_pm_runtime_get(gt_to_xe(gt));
-
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@@ -875,6 +877,7 @@ err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
+err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
return err;
@@ -896,7 +899,9 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
- queue_work(gt->ordered_wq, &gt->reset.worker);
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+ if (!queue_work(gt->ordered_wq, &gt->reset.worker))
+ xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 701349251bbc..e88f113226bc 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -36,7 +36,7 @@
* - act_freq: The actual resolved frequency decided by PCODE.
* - cur_freq: The current one requested by GuC PC to the PCODE.
* - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
- * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
+ * - rpa_freq: The Render Performance (RP) A level, which is the achievable one.
* Calculated by PCODE at runtime based on multiple running conditions
* - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
* Calculated by PCODE at runtime based on multiple running conditions
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 81ecd9382635..164010860664 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -268,13 +268,14 @@ static const struct xe_mmio_range xe3p_xpc_gam_grp1_steering_table[] = {
{},
};
-static const struct xe_mmio_range xe3p_xpc_psmi_grp19_steering_table[] = {
- { 0x00B500, 0x00B5FF },
+static const struct xe_mmio_range xe3p_xpc_node_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
{},
};
static const struct xe_mmio_range xe3p_xpc_instance0_steering_table[] = {
- { 0x00B600, 0x00B6FF }, /* PSMI0 */
+ { 0x00B500, 0x00B6FF }, /* PSMI */
{ 0x00C800, 0x00CFFF }, /* GAMCTRL */
{ 0x00F000, 0x00F0FF }, /* GAMCTRL */
{},
@@ -282,9 +283,22 @@ static const struct xe_mmio_range xe3p_xpc_instance0_steering_table[] = {
static void init_steering_l3bank(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
struct xe_mmio *mmio = &gt->mmio;
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ if (GRAPHICS_VER(xe) >= 35) {
+ unsigned int first_bank = xe_l3_bank_mask_ffs(gt->fuse_topo.l3_bank_mask);
+ const int banks_per_node = 4;
+ unsigned int node = first_bank / banks_per_node;
+
+ /* L3BANK ranges place node in grpID, bank in instanceid */
+ gt->steering[L3BANK].group_target = node;
+ gt->steering[L3BANK].instance_target = first_bank % banks_per_node;
+
+ /* NODE ranges split the node across grpid and instanceid */
+ gt->steering[NODE].group_target = node >> 1;
+ gt->steering[NODE].instance_target = node & 1;
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
@@ -297,7 +311,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
gt->steering[L3BANK].group_target = __ffs(mslice_mask);
gt->steering[L3BANK].instance_target =
bank_mask & BIT(0) ? 0 : 2;
- } else if (gt_to_xe(gt)->info.platform == XE_DG2) {
+ } else if (xe->info.platform == XE_DG2) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank = __ffs(mslice_mask) * 8;
@@ -452,12 +466,6 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
-static void init_steering_psmi(struct xe_gt *gt)
-{
- gt->steering[PSMI19].group_target = 19;
- gt->steering[PSMI19].instance_target = 0;
-}
-
static void init_steering_gam1(struct xe_gt *gt)
{
gt->steering[GAM1].group_target = 1;
@@ -469,12 +477,12 @@ static const struct {
void (*init)(struct xe_gt *gt);
} xe_steering_types[] = {
[L3BANK] = { "L3BANK", init_steering_l3bank },
+ [NODE] = { "NODE", NULL }, /* initialized by l3bank init */
[MSLICE] = { "MSLICE", init_steering_mslice },
[LNCF] = { "LNCF", NULL }, /* initialized by mslice init */
[DSS] = { "DSS / XeCore", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
- [PSMI19] = { "PSMI[19]", init_steering_psmi },
[GAM1] = { "GAMWKRS / STLB / GAMREQSTRM", init_steering_gam1 },
[INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
@@ -524,7 +532,8 @@ void xe_gt_mcr_init_early(struct xe_gt *gt)
gt->steering[DSS].ranges = xe3p_xpc_xecore_steering_table;
gt->steering[GAM1].ranges = xe3p_xpc_gam_grp1_steering_table;
gt->steering[INSTANCE0].ranges = xe3p_xpc_instance0_steering_table;
- gt->steering[PSMI19].ranges = xe3p_xpc_psmi_grp19_steering_table;
+ gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table;
+ gt->steering[NODE].ranges = xe3p_xpc_node_steering_table;
} else if (GRAPHICS_VER(xe) >= 20) {
gt->steering[DSS].ranges = xe2lpg_dss_steering_table;
gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index c4dda87b47cc..0714c758b9c1 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -158,39 +158,19 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
xe_gt_sriov_pf_service_update(gt);
}
-static u32 pf_get_vf_regs_stride(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
-}
-
-static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
-{
- struct xe_reg pf_reg = vf_reg;
-
- pf_reg.vf = 0;
- pf_reg.addr += stride * vfid;
-
- return pf_reg;
-}
-
static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
{
- u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
- struct xe_reg scratch;
- int n, count;
+ struct xe_mmio mmio;
+ int n;
+
+ xe_mmio_init_vf_view(&mmio, &gt->mmio, vfid);
if (xe_gt_is_media_type(gt)) {
- count = MED_VF_SW_FLAG_COUNT;
- for (n = 0; n < count; n++) {
- scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
- xe_mmio_write32(&gt->mmio, scratch, 0);
- }
+ for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), 0);
} else {
- count = VF_SW_FLAG_COUNT;
- for (n = 0; n < count; n++) {
- scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
- xe_mmio_write32(&gt->mmio, scratch, 0);
- }
+ for (n = 0; n < VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&mmio, VF_SW_FLAG(n), 0);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 2e6bd3d1fe1d..9de05db1f090 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -997,6 +997,8 @@ static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START);
+
+ xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 46518e629ba3..4c73a077d314 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -31,7 +31,6 @@
#include "xe_lrc.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
-#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
#include "xe_sriov_vf_ccs.h"
@@ -739,7 +738,7 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
- smp_wmb(); /* Ensure above writes visable before wake */
+ smp_wmb(); /* Ensure above writes visible before wake */
xe_guc_ct_wake_waiters(&gt->uc.guc.ct);
@@ -1218,7 +1217,6 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
xe_gt_sriov_dbg(gt, "migration recovery in progress\n");
- xe_pm_runtime_get(xe);
retry = vf_post_migration_shutdown(gt);
if (retry)
goto queue;
@@ -1241,12 +1239,10 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
vf_post_migration_kickstart(gt);
- xe_pm_runtime_put(xe);
xe_gt_sriov_notice(gt, "migration recovery ended\n");
return;
fail:
vf_post_migration_abort(gt);
- xe_pm_runtime_put(xe);
xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(xe);
return;
@@ -1254,7 +1250,6 @@ fail:
queue:
xe_gt_sriov_info(gt, "Re-queuing migration recovery\n");
queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
- xe_pm_runtime_put(xe);
}
static void migration_worker_func(struct work_struct *w)
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 1e0516ba7422..bd5260221d8d 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -309,6 +309,13 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}
+/* Used to obtain the index of the first L3 bank. */
+unsigned int
+xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask)
+{
+ return find_first_bit(mask, XE_MAX_L3_BANK_MASK_BITS);
+}
+
/**
* xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
* @gt: GT to check
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index 3ff40f44bf2a..162d603c9b81 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -40,6 +40,8 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
+unsigned int
+xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask);
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index d93faa1eedef..0b525643a048 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -66,6 +66,7 @@ struct xe_mmio_range {
*/
enum xe_steering_type {
L3BANK,
+ NODE,
MSLICE,
LNCF,
DSS,
@@ -73,14 +74,6 @@ enum xe_steering_type {
SQIDI_PSMI,
/*
- * The bspec lists multiple ranges as "PSMI," but the different
- * ranges with that label have different grpid steering values so we
- * treat them independently in code. Note that the ranges with grpid=0
- * are included in the INSTANCE0 group above.
- */
- PSMI19,
-
- /*
* Although most GAM ranges must be steered to (0,0) and thus use the
* INSTANCE0 type farther down, some platforms have special rules
* for specific subtypes that require steering to (1,0) instead.
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index d94490979adc..ecc3e091b89e 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -91,6 +91,9 @@ static u32 guc_ctl_feature_flags(struct xe_guc *guc)
if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
+ if (xe_guc_using_main_gamctrl_queues(guc))
+ flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
+
return flags;
}
@@ -1255,8 +1258,13 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
int xe_guc_upload(struct xe_guc *guc)
{
+ struct xe_gt *gt = guc_to_gt(guc);
+
xe_guc_ads_populate(&guc->ads);
+ if (xe_guc_using_main_gamctrl_queues(guc))
+ xe_mmio_write32(&gt->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT);
+
return __xe_guc_upload(guc);
}
@@ -1657,6 +1665,44 @@ void xe_guc_declare_wedged(struct xe_guc *guc)
xe_guc_submit_wedge(guc);
}
+/**
+ * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use.
+ * @guc: The GuC object
+ *
+ * For Xe3p and beyond, we want to program the hardware to use the
+ * "Main GAMCTRL queue" rather than the legacy queue before we upload
+ * the GuC firmware. This will allow the GuC to use a new set of
+ * registers for pagefault handling and avoid some unnecessary
+ * complications with MCR register range handling.
+ *
+ * Return: true if can use new main gamctrl queues.
+ */
+bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ /*
+ * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3
+ * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID
+ * inform the IP version of the subunits.
+ */
+ if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) {
+ u32 val = xe_mmio_read32(&gt->mmio, GMD_ID);
+ u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val);
+
+ if (!subip)
+ return true;
+
+ xe_gt_WARN(gt, subip != 1,
+ "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n",
+ subip);
+
+ return false;
+ }
+
+ return GT_VER(gt) >= 35;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_guc_g2g_test.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 1cca05967e62..e2d4c5f44ae3 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -52,6 +52,7 @@ void xe_guc_stop_prepare(struct xe_guc *guc);
void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
void xe_guc_declare_wedged(struct xe_guc *guc);
+bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc);
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 22ac2a8b74c8..bcb85a1bf26d 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -820,16 +820,20 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
static void guc_um_init_params(struct xe_guc_ads *ads)
{
u32 um_queue_offset = guc_ads_um_queues_offset(ads);
+ struct xe_guc *guc = ads_to_guc(ads);
u64 base_dpa;
u32 base_ggtt;
+ bool with_dpa;
int i;
+ with_dpa = !xe_guc_using_main_gamctrl_queues(guc);
+
base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
- base_dpa + (i * GUC_UM_QUEUE_SIZE));
+ with_dpa ? (base_dpa + (i * GUC_UM_QUEUE_SIZE)) : 0);
ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
base_ggtt + (i * GUC_UM_QUEUE_SIZE));
ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 70c132458ac3..48a8e092023f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -14,7 +14,7 @@ struct xe_bo;
* struct xe_guc_ads - GuC additional data structures (ADS)
*/
struct xe_guc_ads {
- /** @bo: XE BO for GuC ads blob */
+ /** @bo: Xe BO for GuC ads blob */
struct xe_bo *bo;
/** @golden_lrc_size: golden LRC size */
size_t golden_lrc_size;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8b03b50313d9..09d7ff1ef42a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -126,7 +126,7 @@ struct xe_fast_req_fence {
* for the H2G and G2H requests sent and received through the buffers.
*/
struct xe_guc_ct {
- /** @bo: XE BO for CT */
+ /** @bo: Xe BO for CT */
struct xe_bo *bo;
/** @lock: protects everything in CT layer */
struct mutex lock;
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 50c4c2406132..c90dd266e9cf 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -113,6 +113,7 @@ struct guc_update_exec_queue_policy {
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
#define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7)
+#define GUC_CTL_MAIN_GAMCTRL_QUEUES BIT(9)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h
index b3d5c72ac752..02851b924aa4 100644
--- a/drivers/gpu/drm/xe/xe_guc_log_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_log_types.h
@@ -44,7 +44,7 @@ struct xe_guc_log_snapshot {
struct xe_guc_log {
/** @level: GuC log level */
u32 level;
- /** @bo: XE BO for GuC log */
+ /** @bo: Xe BO for GuC log */
struct xe_bo *bo;
/** @stats: logging related stats */
struct {
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 0ef67d3523a7..d4ffdb71ef3d 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1920,7 +1920,7 @@ static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
}
/*
- * All of these functions are an abstraction layer which other parts of XE can
+ * All of these functions are an abstraction layer which other parts of Xe can
* use to trap into the GuC backend. All of these functions, aside from init,
* really shouldn't do much other than trap into the DRM scheduler which
* synchronizes these operations.
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
index 6bf2103602f8..a80175c7c478 100644
--- a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
@@ -207,7 +207,7 @@ static const struct xe_tlb_inval_ops guc_tlb_inval_ops = {
* @guc: GuC object
* @tlb_inval: TLB invalidation client
*
- * Inititialize GuC TLB invalidation by setting back pointer in TLB invalidation
+ * Initialize GuC TLB invalidation by setting back pointer in TLB invalidation
* client to the GuC and setting GuC backend ops.
*/
void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index f62e0c8b67ab..c44777125691 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -14,9 +14,9 @@
* DOC: Map layer
*
* All access to any memory shared with a device (both sysmem and vram) in the
- * XE driver should go through this layer (xe_map). This layer is built on top
+ * Xe driver should go through this layer (xe_map). This layer is built on top
* of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
- * and with extra hooks into the XE driver that allows adding asserts to memory
+ * and with extra hooks into the Xe driver that allows adding asserts to memory
* accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
*/
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 3112c966c67d..56a5804726e9 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -699,9 +699,9 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
}
#define EMIT_COPY_DW 10
-static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
- u64 src_ofs, u64 dst_ofs, unsigned int size,
- unsigned int pitch)
+static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u64 dst_ofs, unsigned int size,
+ unsigned int pitch)
{
struct xe_device *xe = gt_to_xe(gt);
u32 mocs = 0;
@@ -730,6 +730,61 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
bb->cs[bb->len++] = upper_32_bits(src_ofs);
}
+#define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
+static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u64 dst_ofs, unsigned int size, unsigned int pitch)
+{
+ u32 mode, copy_type, width;
+
+ xe_gt_assert(gt, IS_ALIGNED(size, pitch));
+ xe_gt_assert(gt, pitch <= U16_MAX);
+ xe_gt_assert(gt, pitch);
+ xe_gt_assert(gt, size);
+
+ if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
+ IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
+ IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
+ mode = MEM_COPY_PAGE_COPY_MODE;
+ copy_type = 0; /* linear copy */
+ width = size / PAGE_COPY_MODE_PS;
+ } else if (pitch > 1) {
+ xe_gt_assert(gt, size / pitch <= U16_MAX);
+ mode = 0; /* BYTE_COPY */
+ copy_type = MEM_COPY_MATRIX_COPY;
+ width = pitch;
+ } else {
+ mode = 0; /* BYTE_COPY */
+ copy_type = 0; /* linear copy */
+ width = size;
+ }
+
+ xe_gt_assert(gt, width <= U16_MAX);
+
+ bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
+ bb->cs[bb->len++] = width - 1;
+ bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
+ bb->cs[bb->len++] = pitch - 1;
+ bb->cs[bb->len++] = pitch - 1;
+ bb->cs[bb->len++] = lower_32_bits(src_ofs);
+ bb->cs[bb->len++] = upper_32_bits(src_ofs);
+ bb->cs[bb->len++] = lower_32_bits(dst_ofs);
+ bb->cs[bb->len++] = upper_32_bits(dst_ofs);
+ bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
+ FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
+}
+
+static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
+ u64 src_ofs, u64 dst_ofs, unsigned int size,
+ unsigned int pitch)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe->info.has_mem_copy_instr)
+ emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
+ else
+ emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
+}
+
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
@@ -847,7 +902,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
&ccs_it);
while (size) {
- u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
u32 flush_flags = 0;
@@ -1312,7 +1367,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
/* Calculate final sizes and batch size.. */
pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
- batch_size = 2 +
+ batch_size = 1 +
pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
@@ -1798,11 +1853,15 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
u32 ptes;
int i = 0;
+ xe_tile_assert(m->tile, PAGE_ALIGNED(size));
+
ptes = DIV_ROUND_UP(size, gpu_page_size);
while (ptes) {
u32 chunk = min(MAX_PTE_PER_SDI, ptes);
- chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
+ if (!level)
+ chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
+
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = pt_offset;
bb->cs[bb->len++] = 0;
@@ -1811,12 +1870,13 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes -= chunk;
while (chunk--) {
- u64 addr = sram_addr[i].addr & ~(gpu_page_size - 1);
- u64 pte, orig_addr = addr;
+ u64 addr = sram_addr[i].addr;
+ u64 pte;
xe_tile_assert(m->tile, sram_addr[i].proto ==
DRM_INTERCONNECT_SYSTEM);
xe_tile_assert(m->tile, addr);
+ xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
again:
pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
@@ -1827,7 +1887,7 @@ again:
if (gpu_page_size < PAGE_SIZE) {
addr += XE_PAGE_SIZE;
- if (orig_addr + PAGE_SIZE != addr) {
+ if (!PAGE_ALIGNED(addr)) {
chunk--;
goto again;
}
@@ -1860,6 +1920,25 @@ enum xe_migrate_copy_dir {
#define XE_CACHELINE_BYTES 64ull
#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
+{
+ u32 pitch;
+
+ if (IS_ALIGNED(len, PAGE_SIZE))
+ pitch = PAGE_SIZE;
+ else if (IS_ALIGNED(len, SZ_4K))
+ pitch = SZ_4K;
+ else if (IS_ALIGNED(len, SZ_256))
+ pitch = SZ_256;
+ else if (IS_ALIGNED(len, 4))
+ pitch = 4;
+ else
+ pitch = 1;
+
+ xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
+ return pitch;
+}
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long len,
unsigned long sram_offset,
@@ -1871,25 +1950,25 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
struct xe_device *xe = gt_to_xe(gt);
bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
- u32 batch_size = 2;
+ u32 batch_size = 1;
u64 src_L0_ofs, dst_L0_ofs;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
- unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
- PAGE_SIZE : 4;
+ unsigned int pitch = xe_migrate_copy_pitch(xe, len);
int err;
unsigned long i, j;
bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
- if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
- (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ if (!xe->info.has_mem_copy_instr &&
+ drm_WARN_ON(&xe->drm,
+ (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
return ERR_PTR(-EOPNOTSUPP);
xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
- batch_size += pte_update_cmd_size(len);
+ batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
batch_size += EMIT_COPY_DW;
bb = xe_bb_new(gt, batch_size, use_usm_batch);
@@ -1918,10 +1997,10 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
if (use_pde)
build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
- sram_addr, len + sram_offset, 1);
+ sram_addr, npages << PAGE_SHIFT, 1);
else
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, len + sram_offset, 0);
+ sram_addr, npages << PAGE_SHIFT, 0);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
if (use_pde)
@@ -1981,7 +2060,7 @@ err:
*
* Copy from an array dma addresses to a VRAM device physical address
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
@@ -2002,7 +2081,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
*
* Copy from a VRAM device physical address to an array dma addresses
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
@@ -2103,8 +2182,10 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_assert_held(bo);
/* Use bounce buffer for small access and unaligned access */
- if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
- !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
+ if (!xe->info.has_mem_copy_instr &&
+ (!IS_ALIGNED(len, 4) ||
+ !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
+ !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
int buf_offset = 0;
void *bounce;
int err;
@@ -2166,6 +2247,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
cursor.start;
int current_bytes;
+ u32 pitch;
if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
current_bytes = min_t(int, bytes_left,
@@ -2173,13 +2255,13 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
- if (current_bytes & ~PAGE_MASK) {
- int pitch = 4;
-
+ pitch = xe_migrate_copy_pitch(xe, current_bytes);
+ if (xe->info.has_mem_copy_instr)
+ current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
+ else
current_bytes = min_t(int, current_bytes,
round_down(S16_MAX * pitch,
XE_CACHELINE_BYTES));
- }
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
diff --git a/drivers/gpu/drm/xe/xe_migrate_doc.h b/drivers/gpu/drm/xe/xe_migrate_doc.h
index 63c7d67b5b62..c082bc0b7068 100644
--- a/drivers/gpu/drm/xe/xe_migrate_doc.h
+++ b/drivers/gpu/drm/xe/xe_migrate_doc.h
@@ -9,7 +9,7 @@
/**
* DOC: Migrate Layer
*
- * The XE migrate layer is used generate jobs which can copy memory (eviction),
+ * The Xe migrate layer is used generate jobs which can copy memory (eviction),
* clear memory, or program tables (binds). This layer exists in every GT, has
* a migrate engine, and uses a special VM for all generated jobs.
*
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index ef6f3ea573a2..350dca1f0925 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -379,3 +379,32 @@ int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 va
{
return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
}
+
+#ifdef CONFIG_PCI_IOV
+static size_t vf_regs_stride(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
+}
+
+/**
+ * xe_mmio_init_vf_view() - Initialize an MMIO instance for accesses like the VF
+ * @mmio: the target &xe_mmio to initialize as VF's view
+ * @base: the source &xe_mmio to initialize from
+ * @vfid: the VF identifier
+ */
+void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid)
+{
+ struct xe_tile *tile = base->tile;
+ struct xe_device *xe = tile->xe;
+ size_t offset = vf_regs_stride(xe) * vfid;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid);
+ xe_assert(xe, !base->sriov_vf_gt);
+ xe_assert(xe, base->regs_size > offset);
+
+ *mmio = *base;
+ mmio->regs += offset;
+ mmio->regs_size -= offset;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index c151ba569003..15362789ab99 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -42,4 +42,8 @@ static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
return &xe->tiles[0].mmio;
}
+#ifdef CONFIG_PCI_IOV
+void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index e8ec4114302e..6613d3b48a84 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -568,6 +568,23 @@ static const struct xe_mocs_ops xe2_mocs_ops = {
.dump = xe2_mocs_dump,
};
+/*
+ * Note that the "L3" and "L4" register fields actually control the L2 and L3
+ * caches respectively on this platform.
+ */
+static const struct xe_mocs_entry xe3p_xpc_mocs_table[] = {
+ /* Defer to PAT */
+ MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0),
+ /* UC */
+ MOCS_ENTRY(1, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0),
+ /* L2 */
+ MOCS_ENTRY(2, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0),
+ /* L3 */
+ MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0),
+ /* L2 + L3 */
+ MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0),
+};
+
static unsigned int get_mocs_settings(struct xe_device *xe,
struct xe_mocs_info *info)
{
@@ -576,6 +593,15 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
memset(info, 0, sizeof(struct xe_mocs_info));
switch (xe->info.platform) {
+ case XE_CRESCENTISLAND:
+ info->ops = &xe2_mocs_ops;
+ info->table_size = ARRAY_SIZE(xe3p_xpc_mocs_table);
+ info->table = xe3p_xpc_mocs_table;
+ info->num_mocs_regs = XE2_NUM_MOCS_ENTRIES;
+ info->uc_index = 1;
+ info->wb_index = 4;
+ info->unused_entries_index = 4;
+ break;
case XE_NOVALAKE_S:
case XE_PANTHERLAKE:
case XE_LUNARLAKE:
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index c326430e75b5..90e2ee5e9270 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -342,6 +342,7 @@ static const struct xe_device_desc lnl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_pxp = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.va_bits = 48,
@@ -362,6 +363,7 @@ static const struct xe_device_desc bmg_desc = {
.has_heci_cscfi = 1,
.has_late_bind = true,
.has_sriov = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -378,6 +380,7 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_sriov = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.needs_shared_vf_gt_wq = true,
@@ -390,12 +393,27 @@ static const struct xe_device_desc nvls_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_flat_ccs = 1,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.require_force_probe = true,
.va_bits = 48,
.vm_max_level = 4,
};
+static const struct xe_device_desc cri_desc = {
+ DGFX_FEATURES,
+ PLATFORM(CRESCENTISLAND),
+ .dma_mask_size = 52,
+ .has_display = false,
+ .has_flat_ccs = false,
+ .has_mbx_power_limits = true,
+ .has_sriov = true,
+ .max_gt_per_tile = 2,
+ .require_force_probe = true,
+ .va_bits = 57,
+ .vm_max_level = 4,
+};
+
#undef PLATFORM
__diag_pop();
@@ -423,6 +441,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc),
+ INTEL_CRI_IDS(INTEL_PCI_DEVICE, &cri_desc),
+ INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -655,6 +675,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
desc->has_sriov;
+ xe->info.has_mem_copy_instr = desc->has_mem_copy_instr;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index a4451bdc79fb..9892c063a9c5 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -46,6 +46,7 @@ struct xe_device_desc {
u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
+ u8 has_mem_copy_instr:1;
u8 has_pxp:1;
u8 has_sriov:1;
u8 needs_scratch:1;
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index 78286285c249..f516dbddfd88 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -25,6 +25,7 @@ enum xe_platform {
XE_BATTLEMAGE,
XE_PANTHERLAKE,
XE_NOVALAKE_S,
+ XE_CRESCENTISLAND,
};
enum xe_subplatform {
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 53507e09f7bc..7b089e6fb63f 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -102,7 +102,7 @@ static void xe_pm_block_end_signalling(void)
/**
* xe_pm_might_block_on_suspend() - Annotate that the code might block on suspend
*
- * Annotation to use where the code might block or sieze to make
+ * Annotation to use where the code might block or seize to make
* progress pending resume completion.
*/
void xe_pm_might_block_on_suspend(void)
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
index 312c3372a49f..ac125c697a41 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
@@ -12,7 +12,7 @@
struct xe_exec_queue;
/**
- * struct xe_preempt_fence - XE preempt fence
+ * struct xe_preempt_fence - Xe preempt fence
*
* hardware and triggers a callback once the xe_engine is complete.
*/
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index d22fd1ccc0ba..7c5bca78c8bf 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -715,7 +715,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.vm = vm,
.tile = tile,
.curs = &curs,
- .va_curs_start = range ? range->base.itree.start :
+ .va_curs_start = range ? xe_svm_range_start(range) :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
@@ -734,7 +734,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.pages.dma_addr, 0,
- range->base.itree.last + 1 - range->base.itree.start,
+ xe_svm_range_size(range),
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
@@ -778,8 +778,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
- range ? range->base.itree.start : xe_vma_start(vma),
- range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ range ? xe_svm_range_start(range) : xe_vma_start(vma),
+ range ? xe_svm_range_end(range) : xe_vma_end(vma),
&xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
@@ -975,8 +975,8 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
if (!(pt_mask & BIT(tile->id)))
return false;
- (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
- range->base.itree.last + 1, &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
+ xe_svm_range_end(range), &xe_walk.base);
return xe_walk.needs_invalidate;
}
@@ -1661,8 +1661,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
- u64 start = range ? range->base.itree.start : xe_vma_start(vma);
- u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
+ u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
+ u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
@@ -1872,7 +1872,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing bind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = NULL;
pt_op->bind = true;
@@ -1887,8 +1887,8 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->num_entries, true);
xe_pt_update_ops_rfence_interval(pt_update_ops,
- range->base.itree.start,
- range->base.itree.last + 1);
+ xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
@@ -1983,7 +1983,7 @@ static int unbind_range_prepare(struct xe_vm *vm,
vm_dbg(&vm->xe->drm,
"Preparing unbind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = XE_INVALID_VMA;
pt_op->bind = false;
@@ -1994,8 +1994,8 @@ static int unbind_range_prepare(struct xe_vm *vm,
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
- range->base.itree.last + 1);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
diff --git a/drivers/gpu/drm/xe/xe_range_fence.h b/drivers/gpu/drm/xe/xe_range_fence.h
index edd58b34f5c0..4934729dd904 100644
--- a/drivers/gpu/drm/xe/xe_range_fence.h
+++ b/drivers/gpu/drm/xe/xe_range_fence.h
@@ -13,13 +13,13 @@
struct xe_range_fence_tree;
struct xe_range_fence;
-/** struct xe_range_fence_ops - XE range fence ops */
+/** struct xe_range_fence_ops - Xe range fence ops */
struct xe_range_fence_ops {
/** @free: free range fence op */
void (*free)(struct xe_range_fence *rfence);
};
-/** struct xe_range_fence - XE range fence (address conflict tracking) */
+/** struct xe_range_fence - Xe range fence (address conflict tracking) */
struct xe_range_fence {
/** @rb: RB tree node inserted into interval tree */
struct rb_node rb;
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index d21bf8f26964..6ae4cc6a3802 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -160,11 +160,11 @@ err_free:
}
/**
- * xe_sched_job_destroy - Destroy XE schedule job
- * @ref: reference to XE schedule job
+ * xe_sched_job_destroy - Destroy Xe schedule job
+ * @ref: reference to Xe schedule job
*
* Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
- * base DRM schedule job, and free memory for XE schedule job.
+ * base DRM schedule job, and free memory for Xe schedule job.
*/
void xe_sched_job_destroy(struct kref *ref)
{
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index 3dc72c5c1f13..b467131b6d5f 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -23,10 +23,10 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
void xe_sched_job_destroy(struct kref *ref);
/**
- * xe_sched_job_get - get reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_get - get reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Increment XE schedule job's reference count
+ * Increment Xe schedule job's reference count
*/
static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
{
@@ -35,10 +35,10 @@ static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
}
/**
- * xe_sched_job_put - put reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_put - put reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
+ * Decrement Xe schedule job's reference count, call xe_sched_job_destroy when
* reference count == 0.
*/
static inline void xe_sched_job_put(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 13e7a12b03ad..d26612abb4ca 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -32,7 +32,7 @@ struct xe_job_ptrs {
};
/**
- * struct xe_sched_job - XE schedule job (batch buffer tracking)
+ * struct xe_sched_job - Xe schedule job (batch buffer tracking)
*/
struct xe_sched_job {
/** @drm: base DRM scheduler job */
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index 911d5720917b..39c829daa97c 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -130,10 +130,15 @@
bool xe_sriov_vf_migration_supported(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_VF(xe));
- return xe->sriov.vf.migration.enabled;
+ return !xe->sriov.vf.migration.disabled;
}
-static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
+/**
+ * xe_sriov_vf_migration_disable - Turn off VF migration with given log message.
+ * @xe: the &xe_device instance.
+ * @fmt: format string for the log message, to be combined with following VAs.
+ */
+void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...)
{
struct va_format vaf;
va_list va_args;
@@ -146,7 +151,7 @@ static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf);
va_end(va_args);
- xe->sriov.vf.migration.enabled = false;
+ xe->sriov.vf.migration.disabled = true;
}
static void vf_migration_init_early(struct xe_device *xe)
@@ -156,25 +161,12 @@ static void vf_migration_init_early(struct xe_device *xe)
* supported at production quality.
*/
if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
- return vf_disable_migration(xe,
- "experimental feature not available on production builds");
-
- if (GRAPHICS_VER(xe) < 20)
- return vf_disable_migration(xe, "requires gfx version >= 20, but only %u found",
- GRAPHICS_VER(xe));
+ return xe_sriov_vf_migration_disable(xe,
+ "experimental feature not available on production builds");
- if (!IS_DGFX(xe)) {
- struct xe_uc_fw_version guc_version;
+ if (!xe_device_has_memirq(xe))
+ return xe_sriov_vf_migration_disable(xe, "requires memory-based IRQ support");
- xe_gt_sriov_vf_guc_versions(xe_device_get_gt(xe, 0), NULL, &guc_version);
- if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0))
- return vf_disable_migration(xe,
- "CCS migration requires GuC ABI >= 1.23 but only %u.%u found",
- guc_version.major, guc_version.minor);
- }
-
- xe->sriov.vf.migration.enabled = true;
- xe_sriov_dbg(xe, "migration support enabled\n");
}
/**
@@ -196,12 +188,7 @@ void xe_sriov_vf_init_early(struct xe_device *xe)
*/
int xe_sriov_vf_init_late(struct xe_device *xe)
{
- int err = 0;
-
- if (xe_sriov_vf_migration_supported(xe))
- err = xe_sriov_vf_ccs_init(xe);
-
- return err;
+ return xe_sriov_vf_ccs_init(xe);
}
static int sa_info_vf_ccs(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 4df95266b261..e967d4166a43 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -14,6 +14,7 @@ struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
int xe_sriov_vf_init_late(struct xe_device *xe);
bool xe_sriov_vf_migration_supported(struct xe_device *xe);
+void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...);
void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
index 790249801364..797a4b866226 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -10,6 +10,8 @@
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_exec_queue_types.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_guc.h"
#include "xe_guc_submit.h"
#include "xe_lrc.h"
#include "xe_migrate.h"
@@ -260,6 +262,45 @@ int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
return err;
}
+/*
+ * Whether GuC requires CCS copy BBs for VF migration.
+ * @xe: the &xe_device instance.
+ *
+ * Only selected platforms require VF KMD to maintain CCS copy BBs and linked LRCAs.
+ *
+ * Return: true if VF driver must participate in the CCS migration, false otherwise.
+ */
+static bool vf_migration_ccs_bb_needed(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ return !IS_DGFX(xe) && xe_device_has_flat_ccs(xe);
+}
+
+/*
+ * Check for disable migration due to no CCS BBs support in GuC FW.
+ * @xe: the &xe_device instance.
+ *
+ * Performs late disable of VF migration feature in case GuC FW cannot support it.
+ *
+ * Returns: True if VF migration with CCS BBs is supported, false otherwise.
+ */
+static bool vf_migration_ccs_bb_support_check(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_uc_fw_version guc_version;
+
+ xe_gt_sriov_vf_guc_versions(gt, NULL, &guc_version);
+ if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0)) {
+ xe_sriov_vf_migration_disable(xe,
+ "CCS migration requires GuC ABI >= 1.23 but only %u.%u found",
+ guc_version.major, guc_version.minor);
+ return false;
+ }
+
+ return true;
+}
+
static void xe_sriov_vf_ccs_fini(void *arg)
{
struct xe_sriov_vf_ccs_ctx *ctx = arg;
@@ -292,9 +333,10 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe)
int err;
xe_assert(xe, IS_SRIOV_VF(xe));
- xe_assert(xe, xe_sriov_vf_migration_supported(xe));
- if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))
+ if (!xe_sriov_vf_migration_supported(xe) ||
+ !vf_migration_ccs_bb_needed(xe) ||
+ !vf_migration_ccs_bb_support_check(xe))
return 0;
for_each_ccs_rw_ctx(ctx_id) {
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 6a0fd0f5463e..d5f72d667817 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -34,10 +34,10 @@ struct xe_device_vf {
/** @migration: VF Migration state data */
struct {
/**
- * @migration.enabled: flag indicating if migration support
- * was enabled or not due to missing prerequisites
+ * @migration.disabled: flag indicating if migration support
+ * was turned off due to missing prerequisites
*/
- bool enabled;
+ bool disabled;
} migration;
/** @ccs: VF CCS state data */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 129e7818565c..13af589715a7 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -633,7 +633,7 @@ err_out:
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
- * compute always uses the primary GT so accumlate stats on the likely
+ * compute always uses the primary GT so accumulate stats on the likely
* GT of the fault.
*/
if (gt)
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h
index 554634dfd4e2..05614915463a 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval.h
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.h
@@ -33,7 +33,7 @@ void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
* xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
* @fence: TLB invalidation fence to wait on
*
- * Wait on a TLB invalidiation fence until it signals, non interruptable
+ * Wait on a TLB invalidiation fence until it signals, non interruptible
*/
static inline void
xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index e368b2a36bac..1bddecfb723a 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -106,7 +106,7 @@ static u64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
stolen_size = tile_size - mgr->stolen_base;
- xe_assert(xe, stolen_size > wopcm_size);
+ xe_assert(xe, stolen_size >= wopcm_size);
stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
index 1144f9232ebb..a71e14818ec2 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
@@ -10,7 +10,7 @@
#include <drm/ttm/ttm_device.h>
/**
- * struct xe_ttm_vram_mgr - XE TTM VRAM manager
+ * struct xe_ttm_vram_mgr - Xe TTM VRAM manager
*
* Manages placement of TTM resource in VRAM.
*/
@@ -32,7 +32,7 @@ struct xe_ttm_vram_mgr {
};
/**
- * struct xe_ttm_vram_mgr_resource - XE TTM VRAM resource
+ * struct xe_ttm_vram_mgr_resource - Xe TTM VRAM resource
*/
struct xe_ttm_vram_mgr_resource {
/** @base: Base TTM resource */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 77a1dcf8b4ed..2ebe8c9db6ce 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -62,7 +62,7 @@ enum xe_uc_fw_type {
};
/**
- * struct xe_uc_fw_version - Version for XE micro controller firmware
+ * struct xe_uc_fw_version - Version for Xe micro controller firmware
*/
struct xe_uc_fw_version {
/** @branch: branch version of the FW (not always available) */
@@ -84,7 +84,7 @@ enum xe_uc_fw_version_types {
};
/**
- * struct xe_uc_fw - XE micro controller firmware
+ * struct xe_uc_fw - Xe micro controller firmware
*/
struct xe_uc_fw {
/** @type: type uC firmware */
@@ -112,7 +112,7 @@ struct xe_uc_fw {
/** @size: size of uC firmware including css header */
size_t size;
- /** @bo: XE BO for uC firmware */
+ /** @bo: Xe BO for uC firmware */
struct xe_bo *bo;
/** @has_gsc_headers: whether the FW image starts with GSC headers */
diff --git a/drivers/gpu/drm/xe/xe_uc_types.h b/drivers/gpu/drm/xe/xe_uc_types.h
index 9924e4484866..1708379dc834 100644
--- a/drivers/gpu/drm/xe/xe_uc_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_types.h
@@ -12,7 +12,7 @@
#include "xe_wopcm_types.h"
/**
- * struct xe_uc - XE micro controllers
+ * struct xe_uc - Xe micro controllers
*/
struct xe_uc {
/** @guc: Graphics micro controller */
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
index fec331d791e7..1ef181c90434 100644
--- a/drivers/gpu/drm/xe/xe_validation.h
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -108,7 +108,7 @@ struct xe_val_flags {
* @request_exclusive: Whether to lock exclusively (write mode) the next time
* the domain lock is locked.
* @exec_flags: The drm_exec flags used for drm_exec (re-)initialization.
- * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton.
+ * @nr: The drm_exec nr parameter used for drm_exec (re-)initialization.
*/
struct xe_validation_ctx {
struct drm_exec *exec;
@@ -137,7 +137,7 @@ bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret);
* @_ret: The current error value possibly holding -ENOMEM
*
* Use this in way similar to drm_exec_retry_on_contention().
- * If @_ret contains -ENOMEM the tranaction is restarted once in a way that
+ * If @_ret contains -ENOMEM the transaction is restarted once in a way that
* blocks other transactions and allows exhastive eviction. If the transaction
* was already restarted once, Just return the -ENOMEM. May also set
* _ret to -EINTR if not retrying and waits are interruptible.
@@ -180,7 +180,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
* @_val: The xe_validation_device.
* @_exec: The struct drm_exec object
* @_flags: Flags for the xe_validation_ctx initialization.
- * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called.
+ * @_ret: Return in / out parameter. May be set by this macro. Typically 0 when called.
*
* This macro is will initiate a drm_exec transaction with additional support for
* exhaustive eviction.
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 10d77666a425..00f3520dec38 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -824,7 +824,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
*
* (re)bind SVM range setting up GPU page tables for the range.
*
- * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for rebind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
@@ -907,7 +907,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
*
* Unbind SVM range removing the GPU page tables for the range.
*
- * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for unbind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
@@ -1291,7 +1291,7 @@ static u16 pde_pat_index(struct xe_bo *bo)
* selection of options. The user PAT index is only for encoding leaf
* nodes, where we have use of more bits to do the encoding. The
* non-leaf nodes are instead under driver control so the chosen index
- * here should be distict from the user PAT index. Also the
+ * here should be distinct from the user PAT index. Also the
* corresponding coherency of the PAT index should be tied to the
* allocation type of the page table (or at least we should pick
* something which is always safe).
@@ -4172,7 +4172,7 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
/**
* xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
- * @xe: Pointer to the XE device structure
+ * @xe: Pointer to the Xe device structure
* @vma: Pointer to the virtual memory area (VMA) structure
* @is_atomic: In pagefault path and atomic operation
*
@@ -4319,7 +4319,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
} else if (__op->op == DRM_GPUVA_OP_MAP) {
vma = op->map.vma;
- /* In case of madvise call, MAP will always be follwed by REMAP.
+ /* In case of madvise call, MAP will always be followed by REMAP.
* Therefore temp_attr will always have sane values, making it safe to
* copy them to new vma.
*/
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 1030ce214032..02e5288373c9 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -7,7 +7,7 @@
#define _XE_VM_DOC_H_
/**
- * DOC: XE VM (user address space)
+ * DOC: Xe VM (user address space)
*
* VM creation
* ===========
@@ -202,13 +202,13 @@
* User pointers are user allocated memory (malloc'd, mmap'd, etc..) for which the
* user wants to create a GPU mapping. Typically in other DRM drivers a dummy BO
* was created and then a binding was created. We bypass creating a dummy BO in
- * XE and simply create a binding directly from the userptr.
+ * Xe and simply create a binding directly from the userptr.
*
* Invalidation
* ------------
*
* Since this a core kernel managed memory the kernel can move this memory
- * whenever it wants. We register an invalidation MMU notifier to alert XE when
+ * whenever it wants. We register an invalidation MMU notifier to alert Xe when
* a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
@@ -419,7 +419,7 @@
* =======
*
* VM locking protects all of the core data paths (bind operations, execs,
- * evictions, and compute mode rebind worker) in XE.
+ * evictions, and compute mode rebind worker) in Xe.
*
* Locks
* -----
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index d6e2a0fdd4b3..830ed7b05c27 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -52,7 +52,7 @@ struct xe_vm_pgtable_update_op;
* struct xe_vma_mem_attr - memory attributes associated with vma
*/
struct xe_vma_mem_attr {
- /** @preferred_loc: perferred memory_location */
+ /** @preferred_loc: preferred memory_location */
struct {
/** @preferred_loc.migration_policy: Pages migration policy */
u32 migration_policy;
@@ -338,7 +338,7 @@ struct xe_vm {
u64 tlb_flush_seqno;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
- /** @xef: XE file handle for tracking this VM's drm client */
+ /** @xef: Xe file handle for tracking this VM's drm client */
struct xe_file *xef;
};
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index b6dcd9827354..ec638b431131 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -916,6 +916,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("14024681466"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index cf318e3ddb5c..e4eebabab975 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -258,6 +258,8 @@
# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT 3 /* DP 2.1a, in units of 2 MPixels/sec */
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK (0x1f << DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT)
#define DP_DSC_RC_BUF_SIZE 0x063
@@ -1686,6 +1688,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
+#define DP_DSC_BRANCH_CAP_SIZE 3
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 5
#define DP_LTTPR_COMMON_CAP_SIZE 8
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 52ce28097015..df2f24b950e4 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -211,6 +211,11 @@ u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
u8 dsc_bpc[3]);
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]);
static inline bool
drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -828,6 +833,15 @@ enum drm_dp_quirk {
* requires enabling DSC.
*/
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
+ /**
+ * @DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT:
+ *
+ * The device doesn't support DSC decompression at the maximum DSC
+ * pixel throughput and compressed bpp it indicates via its DPCD DSC
+ * capabilities. The compressed bpp must be limited above a device
+ * specific DSC pixel throughput.
+ */
+ DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT,
};
/**
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 2d2a0bd526cf..66278ffeebd6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -318,6 +318,17 @@ struct drm_crtc_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @sharpness_strength:
+ *
+ * Used by the user to set the sharpness intensity.
+ * The value ranges from 0-255.
+ * Default value is 0 which disable the sharpness feature.
+ * Any value greater than 0 enables sharpening with the
+ * specified strength.
+ */
+ u8 sharpness_strength;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1089,6 +1100,12 @@ struct drm_crtc {
struct drm_property *scaling_filter_property;
/**
+ * @sharpness_strength_property: property to apply
+ * the intensity of the sharpness requested.
+ */
+ struct drm_property *sharpness_strength_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1324,4 +1341,5 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters);
bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/intel/display_member.h b/include/drm/intel/display_member.h
new file mode 100644
index 000000000000..0319ea560b60
--- /dev/null
+++ b/include/drm/intel/display_member.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __DRM_INTEL_DISPLAY_H__
+#define __DRM_INTEL_DISPLAY_H__
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+#include <drm/drm_device.h>
+
+struct intel_display;
+
+/*
+ * A dummy device struct to define the relative offsets of drm and display
+ * members. With the members identically placed in struct drm_i915_private and
+ * struct xe_device, this allows figuring out the struct intel_display pointer
+ * without the definition of either driver specific structure.
+ */
+struct __intel_generic_device {
+ struct drm_device drm;
+ struct intel_display *display;
+};
+
+/**
+ * INTEL_DISPLAY_MEMBER_STATIC_ASSERT() - ensure correct placing of drm and display members
+ * @type: The struct to check
+ * @drm_member: Name of the struct drm_device member
+ * @display_member: Name of the struct intel_display * member.
+ *
+ * Use this static assert macro to ensure the struct drm_i915_private and struct
+ * xe_device struct drm_device and struct intel_display * members are at the
+ * same relative offsets.
+ */
+#define INTEL_DISPLAY_MEMBER_STATIC_ASSERT(type, drm_member, display_member) \
+ static_assert( \
+ offsetof(struct __intel_generic_device, display) - offsetof(struct __intel_generic_device, drm) == \
+ offsetof(type, display_member) - offsetof(type, drm_member), \
+ __stringify(type) " " __stringify(drm_member) " and " __stringify(display_member) " members at invalid offsets")
+
+#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
new file mode 100644
index 000000000000..26bedc360044
--- /dev/null
+++ b/include/drm/intel/display_parent_interface.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation x*/
+
+#ifndef __DISPLAY_PARENT_INTERFACE_H__
+#define __DISPLAY_PARENT_INTERFACE_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct ref_tracker;
+
+struct intel_display_rpm_interface {
+ struct ref_tracker *(*get)(const struct drm_device *drm);
+ struct ref_tracker *(*get_raw)(const struct drm_device *drm);
+ struct ref_tracker *(*get_if_in_use)(const struct drm_device *drm);
+ struct ref_tracker *(*get_noresume)(const struct drm_device *drm);
+
+ void (*put)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_raw)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_unchecked)(const struct drm_device *drm);
+
+ bool (*suspended)(const struct drm_device *drm);
+ void (*assert_held)(const struct drm_device *drm);
+ void (*assert_block)(const struct drm_device *drm);
+ void (*assert_unblock)(const struct drm_device *drm);
+};
+
+/**
+ * struct intel_display_parent_interface - services parent driver provides to display
+ *
+ * The parent, or core, driver provides a pointer to this structure to display
+ * driver when calling intel_display_device_probe(). The display driver uses it
+ * to access services provided by the parent driver. The structure may contain
+ * sub-struct pointers to group function pointers by functionality.
+ *
+ * All function and sub-struct pointers must be initialized and callable unless
+ * explicitly marked as "optional" below. The display driver will only NULL
+ * check the optional pointers.
+ */
+struct intel_display_parent_interface {
+ /** @rpm: Runtime PM functions */
+ const struct intel_display_rpm_interface *rpm;
+};
+
+#endif
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 9f095a99d6c9..b258e79b437a 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -880,7 +880,10 @@
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0B0, ## __VA_ARGS__), \
+ MACRO__(0xB0B0, ## __VA_ARGS__)
+
+/* WCL */
+#define INTEL_WCL_IDS(MACRO__, ...) \
MACRO__(0xFD80, ## __VA_ARGS__), \
MACRO__(0xFD81, ## __VA_ARGS__)
@@ -893,4 +896,8 @@
MACRO__(0xD744, ## __VA_ARGS__), \
MACRO__(0xD745, ## __VA_ARGS__)
+/* CRI */
+#define INTEL_CRI_IDS(MACRO__, ...) \
+ MACRO__(0x674C, ## __VA_ARGS__)
+
#endif /* __PCIIDS_H__ */