summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/amdgpu/amd-hardware-list-info.rst4
-rw-r--r--Documentation/gpu/amdgpu/apu-asic-info-table.csv34
-rw-r--r--Documentation/gpu/amdgpu/debugfs.rst4
-rw-r--r--Documentation/gpu/amdgpu/dgpu-asic-info-table.csv58
-rw-r--r--Documentation/gpu/amdgpu/display/dc-glossary.rst2
-rw-r--r--Documentation/gpu/amdgpu/display/display-contributing.rst4
-rw-r--r--Documentation/gpu/amdgpu/display/programming-model-dcn.rst2
-rw-r--r--Documentation/gpu/amdgpu/driver-core.rst2
-rw-r--r--Documentation/gpu/amdgpu/process-isolation.rst2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c260
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c333
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c180
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c74
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c73
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c56
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c311
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c50
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h193
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h30
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h81
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h30
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c79
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c153
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c58
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h108
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c305
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c151
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h26
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
279 files changed, 4879 insertions, 2428 deletions
diff --git a/Documentation/gpu/amdgpu/amd-hardware-list-info.rst b/Documentation/gpu/amdgpu/amd-hardware-list-info.rst
index 1786544fe7c1..e72f4ff770c4 100644
--- a/Documentation/gpu/amdgpu/amd-hardware-list-info.rst
+++ b/Documentation/gpu/amdgpu/amd-hardware-list-info.rst
@@ -10,7 +10,7 @@ Accelerated Processing Units (APU) Info
.. csv-table::
:header-rows: 1
- :widths: 3, 2, 2, 1, 1, 1, 1
+ :widths: 3, 2, 2, 1, 1, 1, 1, 1
:file: ./apu-asic-info-table.csv
Discrete GPU Info
@@ -18,6 +18,6 @@ Discrete GPU Info
.. csv-table::
:header-rows: 1
- :widths: 3, 2, 2, 1, 1, 1
+ :widths: 3, 2, 2, 1, 1, 1, 1, 1
:file: ./dgpu-asic-info-table.csv
diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
index 1d50b539677f..b479c5629146 100644
--- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv
+++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
@@ -1,17 +1,17 @@
-Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version, MP0 version
-Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3, n/a
-Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0, 10.0.0
-Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3
-Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1
-SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
-Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
-Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
-Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
-Ryzen 9000 series (AM5), Granite Ridge, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
-Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
-Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
-Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
-Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
-Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0
-Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4
-Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1
+Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version, MP0 version, MP1 version
+Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3, n/a, 8
+Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0, 10.0.0, 10.0.0
+Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3, 12.0.1
+Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1, 10.0.1
+SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0, 11.5.0
+Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1, 12.0.1
+Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3, 13.0.3
+Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5, 13.0.5
+Ryzen 9000 series (AM5), Granite Ridge, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5, 13.0.5
+Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5, 13.0.5
+Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8, 13.0.8
+Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11
+Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11
+Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0, 14.0.0
+Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4, 14.0.4
+Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1, 14.0.1
diff --git a/Documentation/gpu/amdgpu/debugfs.rst b/Documentation/gpu/amdgpu/debugfs.rst
index 5150d0a95658..151d8bfc79e2 100644
--- a/Documentation/gpu/amdgpu/debugfs.rst
+++ b/Documentation/gpu/amdgpu/debugfs.rst
@@ -94,7 +94,7 @@ amdgpu_error_<name>
-------------------
Provides an interface to set an error code on the dma fences associated with
-ring <name>. The error code specified is propogated to all fences associated
+ring <name>. The error code specified is propagated to all fences associated
with the ring. Use this to inject a fence error into a ring.
amdgpu_pm_info
@@ -165,7 +165,7 @@ GTT memory.
amdgpu_regs_*
-------------
-Provides direct access to various register aperatures on the GPU. Used
+Provides direct access to various register apertures on the GPU. Used
by tools like UMR to access GPU registers.
amdgpu_regs2
diff --git a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv
index d2f10ee69dfc..bfd44c6e052a 100644
--- a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv
+++ b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv
@@ -1,28 +1,30 @@
-Product Name, Code Reference, DCN/DCE version, GC version, VCN version, SDMA version
-AMD Radeon (TM) HD 8500M/ 8600M /M200 /M320 /M330 /M335 Series, HAINAN, --, 6, --, --
-AMD Radeon HD 7800 /7900 /FireGL Series, TAHITI, DCE 6, 6, VCE 1 / UVD 3, --
-AMD Radeon R7 (TM|HD) M265 /M370 /8500M /8600 /8700 /8700M, OLAND, DCE 6, 6, VCE 1 / UVD 3, --
-AMD Radeon (TM) (HD|R7) 7800 /7970 /8800 /8970 /370/ Series, PITCAIRN, DCE 6, 6, VCE 1 / UVD 3, --
-AMD Radeon (TM|R7|R9|HD) E8860 /M360 /7700 /7800 /8800 /9000(M) /W4100 Series, VERDE, DCE 6, 6, VCE 1 / UVD 3, --
-AMD Radeon HD M280X /M380 /7700 /8950 /W5100, BONAIRE, DCE 8, 7, VCE 2 / UVD 4.2, 1
-AMD Radeon (R9|TM) 200 /390 /W8100 /W9100 Series, HAWAII, DCE 8, 7, VCE 2 / UVD 4.2, 1
-AMD Radeon (TM) R(5|7) M315 /M340 /M360, TOPAZ, *, 8, --, 2
-AMD Radeon (TM) R9 200 /380 /W7100 /S7150 /M390 /M395 Series, TONGA, DCE 10, 8, VCE 3 / UVD 5, 3
-AMD Radeon (FirePro) (TM) R9 Fury Series, FIJI, DCE 10, 8, VCE 3 / UVD 6, 3
-Radeon RX 470 /480 /570 /580 /590 Series - AMD Radeon (TM) (Pro WX) 5100 /E9390 /E9560 /E9565 /V7350 /7100 /P30PH, POLARIS10, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3
-Radeon (TM) (RX|Pro WX) E9260 /460 /V5300X /550 /560(X) Series, POLARIS11, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3
-Radeon (RX/Pro) 500 /540(X) /550 /640 /WX2100 /WX3100 /WX200 Series, POLARIS12, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3
-Radeon (RX|TM) (PRO|WX) Vega /MI25 /V320 /V340L /8200 /9100 /SSG MxGPU, VEGA10, DCE 12, 9.0.1, VCE 4.0.0 / UVD 7.0.0, 4.0.0
-AMD Radeon (Pro) VII /MI50 /MI60, VEGA20, DCE 12, 9.4.0, VCE 4.1.0 / UVD 7.2.0, 4.2.0
-MI100, ARCTURUS, *, 9.4.1, VCN 2.5.0, 4.2.2
-MI200 Series, ALDEBARAN, *, 9.4.2, VCN 2.6.0, 4.4.0
-MI300 Series, AQUA_VANJARAM, *, 9.4.3, VCN 4.0.3, 4.4.2
-AMD Radeon (RX|Pro) 5600(M|XT) /5700 (M|XT|XTB) /W5700, NAVI10, DCN 2.0.0, 10.1.10, VCN 2.0.0, 5.0.0
-AMD Radeon (Pro) 5300 /5500XTB/5500(XT|M) /W5500M /W5500, NAVI14, DCN 2.0.0, 10.1.1, VCN 2.0.2, 5.0.2
-AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN 3.0.0, 5.2.0
-AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0.0, 5.2.2
-AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4
-AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5
-AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0
-AMD Radeon RX 7800 XT, , DCN 3.2.0, 11.0.3, VCN 4.0.0, 6.0.3
-AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2
+Product Name, Code Reference, DCN/DCE version, GC version, VCN version, SDMA version, MP0 version, MP1 version
+AMD Radeon (TM) HD 8500M/ 8600M /M200 /M320 /M330 /M335 Series, HAINAN, --, 6, --, --, --, 6
+AMD Radeon HD 7800 /7900 /FireGL Series, TAHITI, DCE 6, 6, VCE 1 / UVD 3, --, --, 6
+AMD Radeon R7 (TM|HD) M265 /M370 /8500M /8600 /8700 /8700M, OLAND, DCE 6, 6, -- / UVD 3, --, --, 6
+AMD Radeon (TM) (HD|R7) 7800 /7970 /8800 /8970 /370/ Series, PITCAIRN, DCE 6, 6, VCE 1 / UVD 3, --, --, 6
+AMD Radeon (TM|R7|R9|HD) E8860 /M360 /7700 /7800 /8800 /9000(M) /W4100 Series, VERDE, DCE 6, 6, VCE 1 / UVD 3, --, --, 6
+AMD Radeon HD M280X /M380 /7700 /8950 /W5100, BONAIRE, DCE 8, 7, VCE 2 / UVD 4.2, 1, --, 7
+AMD Radeon (R9|TM) 200 /390 /W8100 /W9100 Series, HAWAII, DCE 8, 7, VCE 2 / UVD 4.2, 1, --, 7
+AMD Radeon (TM) R(5|7) M315 /M340 /M360, TOPAZ, *, 8, --, 2, n/a, 7
+AMD Radeon (TM) R9 200 /380 /W7100 /S7150 /M390 /M395 Series, TONGA, DCE 10, 8, VCE 3 / UVD 5, 3, n/a, 7
+AMD Radeon (FirePro) (TM) R9 Fury Series, FIJI, DCE 10, 8, VCE 3 / UVD 6, 3, n/a, 7
+Radeon RX 470 /480 /570 /580 /590 Series - AMD Radeon (TM) (Pro WX) 5100 /E9390 /E9560 /E9565 /V7350 /7100 /P30PH, POLARIS10, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3, n/a, 7
+Radeon (TM) (RX|Pro WX) E9260 /460 /V5300X /550 /560(X) Series, POLARIS11, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3, n/a, 7
+Radeon (RX/Pro) 500 /540(X) /550 /640 /WX2100 /WX3100 /WX200 Series, POLARIS12, DCE 11.2, 8, VCE 3.4 / UVD 6.3, 3, n/a, 7
+Radeon (RX|TM) (PRO|WX) Vega /MI25 /V320 /V340L /8200 /9100 /SSG MxGPU, VEGA10, DCE 12, 9.0.1, VCE 4.0.0 / UVD 7.0.0, 4.0.0, 9.0.0, 9.0.0
+AMD Radeon (Pro) VII /MI50 /MI60, VEGA20, DCE 12, 9.4.0, VCE 4.1.0 / UVD 7.2.0, 4.2.0, 11.0.2, 11.0.2
+MI100, ARCTURUS, *, 9.4.1, VCN 2.5.0, 4.2.2, 11.0.4, 11.0.2
+MI200 Series, ALDEBARAN, *, 9.4.2, VCN 2.6.0, 4.4.0, 13.0.2, 13.0.2
+MI300 Series, AQUA_VANJARAM, *, 9.4.3, VCN 4.0.3, 4.4.2, 13.0.6, 13.0.6
+AMD Radeon (RX|Pro) 5600(M|XT) /5700 (M|XT|XTB) /W5700, NAVI10, DCN 2.0.0, 10.1.10, VCN 2.0.0, 5.0.0, 11.0.0, 11.0.0
+AMD Radeon (Pro) 5300 /5500XTB/5500(XT|M) /W5500M /W5500, NAVI14, DCN 2.0.0, 10.1.1, VCN 2.0.2, 5.0.2, 11.0.5, 11.0.5
+AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN 3.0.0, 5.2.0, 11.0.7, 11.0.7
+AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0.0, 5.2.2, 11.0.11, 11.0.11
+AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4, 11.0.12, 11.0.12
+AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5, 11.0.13, 11.0.13
+AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0, 13.0.0, 13.0.0
+AMD Radeon RX 7800 XT, , DCN 3.2.0, 11.0.3, VCN 4.0.0, 6.0.3, 13.0.10, 13.0.10
+AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2, 13.0.7, 13.0.7
+AMD Radeon RX 9070 (XT), , DCN 4.0.1, 12.0.1, VCN 5.0.0, 7.0.1, 14.0.3, 14.0.3
+AMD Radeon RX 9060 XT, , DCN 4.0.1, 12.0.0, VCN 5.0.0, 7.0.0, 14.0.2, 14.0.2
diff --git a/Documentation/gpu/amdgpu/display/dc-glossary.rst b/Documentation/gpu/amdgpu/display/dc-glossary.rst
index 7dc034e9e586..cbe737d1fcea 100644
--- a/Documentation/gpu/amdgpu/display/dc-glossary.rst
+++ b/Documentation/gpu/amdgpu/display/dc-glossary.rst
@@ -5,7 +5,7 @@ DC Glossary
On this page, we try to keep track of acronyms related to the display
component. If you do not find what you are looking for, look at the
'Documentation/gpu/amdgpu/amdgpu-glossary.rst'; if you cannot find it anywhere,
-consider asking in the amdgfx and update this page.
+consider asking on the amd-gfx mailing list and update this page.
.. glossary::
diff --git a/Documentation/gpu/amdgpu/display/display-contributing.rst b/Documentation/gpu/amdgpu/display/display-contributing.rst
index 36f3077eee00..2f741c52dce5 100644
--- a/Documentation/gpu/amdgpu/display/display-contributing.rst
+++ b/Documentation/gpu/amdgpu/display/display-contributing.rst
@@ -9,8 +9,8 @@ contribution to the display code, and for that, we say thank you :)
This page summarizes some of the issues you can help with; keep in mind that
this is a static page, and it is always a good idea to try to reach developers
-in the amdgfx or some of the maintainers. Finally, this page follows the DRM
-way of creating a TODO list; for more information, check
+on the amd-gfx mailing list or some of the maintainers. Finally, this page
+follows the DRM way of creating a TODO list; for more information, check
'Documentation/gpu/todo.rst'.
Gitlab issues
diff --git a/Documentation/gpu/amdgpu/display/programming-model-dcn.rst b/Documentation/gpu/amdgpu/display/programming-model-dcn.rst
index c1b48d49fb0b..bc7de97a746f 100644
--- a/Documentation/gpu/amdgpu/display/programming-model-dcn.rst
+++ b/Documentation/gpu/amdgpu/display/programming-model-dcn.rst
@@ -100,7 +100,7 @@ represents the connected display.
For historical reasons, we used the name `dc_link`, which gives the
wrong impression that this abstraction only deals with physical connections
that the developer can easily manipulate. However, this also covers
- conections like eDP or cases where the output is connected to other devices.
+ connections like eDP or cases where the output is connected to other devices.
There are two structs that are not represented in the diagram since they were
elaborated in the DCN overview page (check the DCN block diagram :ref:`Display
diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst
index 81256318e93c..bd4be32f2725 100644
--- a/Documentation/gpu/amdgpu/driver-core.rst
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -65,7 +65,7 @@ SDMA (System DMA)
GC (Graphics and Compute)
This is the graphics and compute engine, i.e., the block that
- encompasses the 3D pipeline and and shader blocks. This is by far the
+ encompasses the 3D pipeline and shader blocks. This is by far the
largest block on the GPU. The 3D pipeline has tons of sub-blocks. In
addition to that, it also contains the CP microcontrollers (ME, PFP, CE,
MEC) and the RLC microcontroller. It's exposed to userspace for user mode
diff --git a/Documentation/gpu/amdgpu/process-isolation.rst b/Documentation/gpu/amdgpu/process-isolation.rst
index 6b6d70e357a7..25b06ffefc33 100644
--- a/Documentation/gpu/amdgpu/process-isolation.rst
+++ b/Documentation/gpu/amdgpu/process-isolation.rst
@@ -26,7 +26,7 @@ Example of enabling enforce isolation on a GPU with multiple partitions:
$ cat /sys/class/drm/card0/device/enforce_isolation
1 0 1 0
-The output indicates that enforce isolation is enabled on zeroth and second parition and disabled on first and fourth parition.
+The output indicates that enforce isolation is enabled on zeroth and second partition and disabled on first and third partition.
For devices with a single partition or those that do not support partitions, there will be only one element:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef3af170dda4..01f53700694b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -819,6 +819,20 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -896,6 +910,9 @@ struct amdgpu_pcie_reset_ctx {
bool in_link_reset;
bool occurs_dpc;
bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
};
/*
@@ -929,12 +946,6 @@ enum amdgpu_enforce_isolation_mode {
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
};
-
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
-
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1302,6 +1313,7 @@ struct amdgpu_device {
struct list_head userq_mgr_list;
struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct amdgpu_uid *uid_info;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1785,4 +1797,9 @@ static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index cbc40cad581b..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -130,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
struct aca_banks *banks, struct ras_query_context *qctx)
@@ -168,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
bank.smu_err_type = type;
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
@@ -178,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -229,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 260165bbe373..d478acb4568a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -494,7 +494,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -504,7 +505,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -961,7 +962,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -2969,9 +2970,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 5e375e9c4f5d..a381de8648e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1195,29 +1195,60 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * Returns the maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * Validates the given display mode on DVI and HDMI connectors,
+ * including analog signals on DVI-I.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 25252231a68a..6c266f18c598 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -206,6 +206,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
{
struct cper_sec_desc *section_desc;
struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
@@ -224,6 +225,9 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
/* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
@@ -234,8 +238,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d3f220be2ef9..2ac9729e4c86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -396,7 +396,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -468,7 +468,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
@@ -902,7 +902,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page *),
GFP_KERNEL);
if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
+ drm_err(adev_to_drm(p->adev), "kvmalloc_array failure\n");
r = -ENOMEM;
goto out_free_user_pages;
}
@@ -983,7 +983,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -1061,13 +1061,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1238,7 +1238,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1451,7 +1451,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1466,9 +1466,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 01d234cf8156..b9e2f0293d61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -178,6 +178,8 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -2445,6 +2447,33 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2473,8 +2502,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- dev_info(adev->dev, "detected ip block number %d <%s>\n",
- adev->num_ip_blocks, ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2674,6 +2708,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2857,6 +2909,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.supported)
amdgpu_xgmi_early_init(adev);
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -3389,7 +3443,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3648,6 +3702,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -4992,7 +5047,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -5701,7 +5757,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU link reset\n");
- if (!adev->pcie_reset_ctx.occurs_dpc)
+ if (!amdgpu_reset_in_dpc(adev))
ret = amdgpu_dpm_link_reset(adev);
if (ret)
@@ -5830,6 +5886,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -6136,12 +6193,11 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
struct list_head *device_list,
struct amdgpu_hive_info *hive)
{
struct amdgpu_device *tmp_adev = NULL;
- int r;
/*
* Build list of devices to reset.
@@ -6153,7 +6209,7 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
- if (adev->pcie_reset_ctx.occurs_dpc)
+ if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
@@ -6161,14 +6217,6 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
} else {
list_add_tail(&adev->reset_list, device_list);
}
-
- if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list);
- if (r)
- return r;
- }
-
- return 0;
}
static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
@@ -6237,9 +6285,8 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- (!adev->pcie_reset_ctx.occurs_dpc) &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -6267,11 +6314,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list, reset_list) {
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6449,8 +6492,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
@@ -6461,8 +6505,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
- if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
- goto end_reset;
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
@@ -6890,11 +6939,6 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (!amdgpu_dpm_is_link_reset_supported(adev)) {
- dev_warn(adev->dev, "No support for XGMI hive yet...\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
adev->pci_channel_state = state;
switch (state) {
@@ -6904,10 +6948,23 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_frozen:
/* Fatal error, prepare for slot reset */
dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- if (hive)
mutex_lock(&hive->hive_lock);
- adev->pcie_reset_ctx.occurs_dpc = true;
+ }
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
@@ -6966,22 +7023,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_device *tmp_adev;
struct amdgpu_hive_info *hive;
struct list_head device_list;
- int r = 0, i;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
u32 memsize;
-
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
+ u16 status;
dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- /* wait for asic to come out of reset */
- msleep(700);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -7076,7 +7145,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
- adev->pcie_reset_ctx.occurs_dpc = false;
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -7084,6 +7152,58 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
}
}
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(adev->pdev);
+ int r;
+
+ if (parent->vendor != PCI_VENDOR_ID_ATI)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(parent);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent);
+
+ parent = pci_upstream_bridge(parent);
+ r = pci_save_state(parent);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent);
+
+ adev->pcie_reset_ctx.swus = parent;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
+ }
+
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
+}
+
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -7108,6 +7228,8 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -7494,3 +7616,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..17f754d1135d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -886,7 +886,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -2597,6 +2597,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
/* don't allow going deep first time followed by s2idle the next time */
if (adev->last_suspend_state != PM_SUSPEND_ON &&
adev->last_suspend_state != pm_suspend_target_state) {
@@ -2604,11 +2605,14 @@ static int amdgpu_pmops_suspend(struct device *dev)
pm_suspend_target_state);
return -EINVAL;
}
+#endif
return 0;
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/* cache the state last used for suspend */
adev->last_suspend_state = pm_suspend_target_state;
+#endif
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2933,11 +2937,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
- if (fpriv) {
+ if (fpriv && drm_dev_enter(dev, &idx)) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
}
return drm_release(inode, filp);
@@ -2964,15 +2971,15 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3117,7 +3124,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 9e7506965cab..fd8cca241da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -120,7 +120,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
if (!am_fence)
return -ENOMEM;
- am_fence->context = 0;
} else {
am_fence = af;
}
@@ -738,7 +737,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
}
-/**
+/*
* Kernel queue reset handling
*
* The driver can reset individual queues for most engines, but those queues
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6626a6e64ff5..d5e685c5e28b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -790,36 +790,6 @@ error:
return fence;
}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
-}
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -840,7 +810,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -944,10 +913,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -959,10 +927,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b51e8f95ee86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,7 +63,6 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 397c6ccdb903..55097ca10738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -154,15 +154,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -356,9 +356,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 82d58ac7afb0..f0d7e2487237 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -368,7 +370,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c0d2c195fe2e..489a4a0f0610 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -280,6 +280,13 @@ struct mes_reset_queue_input {
bool is_kq;
};
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -367,6 +374,9 @@ struct amdgpu_mes_funcs {
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c316920f3450..87523fcd4386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 23484317a5fa..1d6e1d5de8fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -506,8 +506,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
- AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf);
@@ -666,6 +665,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "FB_FW_RESERV_ADDR";
case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -877,9 +880,7 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 540817e296da..7fe5b1940df8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -122,12 +122,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -136,10 +139,14 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
@@ -169,18 +176,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -207,6 +212,49 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -297,6 +345,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -311,6 +361,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -500,6 +559,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -513,22 +575,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -2566,18 +2622,26 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
goto out;
}
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
+ *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
if (!*bps) {
ret = -ENOMEM;
goto out;
}
for (; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
(*bps)[i] = (struct ras_badpage){
.bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
+
+ if (amdgpu_ras_check_critical_address(adev,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
@@ -2586,7 +2650,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
- *count = data->count;
+ *count = con->bad_page_num;
out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2722,7 +2786,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2814,8 +2878,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
@@ -2828,6 +2895,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
return 0;
@@ -2974,7 +3042,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
@@ -2988,8 +3056,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
@@ -3012,7 +3082,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -3033,27 +3103,26 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
+ if (unit_num > 0) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num], save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num +
+ &data->bps[data->count_saved +
i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
@@ -3062,6 +3131,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -3116,17 +3186,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
}
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3134,18 +3204,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3153,11 +3229,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3241,7 +3317,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3261,7 +3337,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3287,7 +3363,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3299,13 +3374,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3316,49 +3387,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
+ consumption_q_count = ecc_log->consumption_q_count;
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ if (de_queried_count && consumption_q_count)
+ break;
- if (timeout) {
- if (!--timeout)
- break;
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3394,6 +3455,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3403,8 +3470,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3434,6 +3499,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3446,7 +3512,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3463,6 +3530,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3487,9 +3555,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3570,8 +3641,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3589,6 +3662,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3661,6 +3735,8 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+
return 0;
}
/* recovery end */
@@ -4087,6 +4163,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4366,6 +4448,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -5274,6 +5359,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5310,3 +5398,80 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 927d6bff734a..6cf0dfd38be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -492,8 +492,15 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
};
struct amdgpu_ras {
@@ -515,6 +522,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -557,6 +565,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -570,6 +579,17 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
};
struct ras_fs_data {
@@ -608,6 +628,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -973,6 +994,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9bda9ad13f88..3eb3fb55ccb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -743,8 +743,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
else
control->ras_num_mca_recs += num;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -766,6 +765,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
if ((amdgpu_bad_page_threshold != -1) &&
(amdgpu_bad_page_threshold != -2)) {
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
@@ -774,9 +777,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.health_percent = 0;
}
ras->is_rma = true;
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -1457,8 +1461,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
dev_dbg(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..2f92b3be40f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -160,4 +160,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index d45ebfb642ca..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
- va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- va_flags);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index c92b8794aa73..2e039fb778ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -252,6 +252,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index c3ace8030530..f72de06a4ac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -471,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
+ r = -EINVAL;
goto unlock;
}
@@ -567,7 +568,6 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.queue_size ||
args->in.rptr_va ||
args->in.wptr_va ||
- args->in.wptr_va ||
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f1f67521c29c..ad415203d245 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -184,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
dev_info(adev->dev,
- "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -285,6 +286,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
@@ -352,8 +357,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
if (adev->vcn.harvest_config & (1 << i))
return 0;
- cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
-
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
@@ -405,6 +408,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -412,7 +463,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -438,16 +488,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -457,30 +502,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -508,6 +534,7 @@ pg_lock:
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -1527,3 +1554,86 @@ int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
return amdgpu_vcn_reset_engine(adev, ring->me);
}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0bc0a94d7cf0..6d9acd36041d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -237,6 +237,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -362,6 +364,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -557,4 +561,11 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3da3ebb1d9a1..58accf2259b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -267,7 +267,8 @@ struct amdgpu_virt {
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
- struct work_struct bad_pages_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0b87798daebd..bd12d8ff15a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1339,13 +1339,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1486,7 +1487,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1765,7 +1766,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1825,7 +1826,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1884,7 +1885,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -2741,7 +2742,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fd086efd8457..3b4fa3246675 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -538,11 +538,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..474bfe36c0c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -379,9 +379,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +436,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +448,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +873,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +962,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 78f9e86ccc09..e69db0a93378 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -396,6 +396,35 @@ out:
return ret;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
+{
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+}
+
static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
@@ -568,6 +597,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
remaining_size -= size;
}
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
@@ -645,6 +678,10 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
@@ -933,6 +970,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
if (!adev->gmc.is_app_apu) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 2c88d5fd87da..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -72,4 +86,7 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index c417f8689220..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -406,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 70a0f8400b57..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -112,6 +112,7 @@ struct amdgpu_xcp {
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bf7c22f81cda..ba73518f5cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1462,17 +1462,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 47e05783c4a0..b01d88d078fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1511,17 +1511,12 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 276c025c4c03..81760a26f2ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1451,17 +1451,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e62ccf9eb73d..19a265bd4d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1443,17 +1443,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7bd506f06eb1..264183ab24ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7668,19 +7668,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c01c241a1b06..c85de8c8f6f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1612,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
- adev->gfx.me_fw_version >= 2390 &&
- adev->gfx.pfp_fw_version >= 2530 &&
- adev->gfx.mec_fw_version >= 2600 &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -4129,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4281,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 3e138527d534..fd44d5503e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3026,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3175,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 20b30f4b3c7d..a6ff9a137a83 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2650,6 +2650,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -4172,19 +4175,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c48cd47b531f..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 51babf5c78c8..8ba66d4dfe86 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -2461,19 +2461,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 7923f491cf73..7031dd8c3c5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -466,24 +466,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -508,21 +490,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -563,7 +563,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f15d691e9a20..93d2b0bbe641 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -430,24 +430,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -472,21 +454,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -527,7 +527,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index de763105fdfd..9ba055ddc00f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -336,6 +336,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x81) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -453,20 +469,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -490,19 +492,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
- *flags |= AMDGPU_PTE_PRT_GFX12;
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
*flags |= AMDGPU_PTE_IS_PTE;
@@ -543,7 +561,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 8030fcd64210..f6ad7911f1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a8d5795084fc..93d7ccb7d013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index b45fa0cea9d2..c5e2a2c41e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c4d69cf4e06c..8404695eb13f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1073,27 +1073,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1123,6 +1102,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1236,25 +1216,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
- flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1391,7 +1389,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b86288a69e7b..a78144773fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -444,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 54523dc1f702..47bde62b3909 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -196,6 +196,14 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
if (r)
return r;
@@ -307,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -1016,8 +1024,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
- 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
- 24, 25, 26, 27, 28, 29, 30, 31
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1058,6 +1067,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -1065,11 +1079,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
goto late_fini;
}
- r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
- &jpeg_v5_0_1_aca_info, NULL);
- if (r)
- goto late_fini;
-
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 28eb846280dd..3f6a828cad8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -641,8 +641,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
- return -EINVAL;
+ dev_warn_once(mes->adev->dev,
+ "MES FW version must be larger than 0x63 to support limit single process feature.\n");
+ return 0;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
misc_pkt.change_config.opcode =
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 6b222630f3fa..cd5c966cee95 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -108,6 +108,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -879,6 +880,46 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ mes_inv_tlbs.invalidate_tlbs.hub_id = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (mes_inv_tlbs.invalidate_tlbs.hub_id < 0)
+ return -EINVAL;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -888,6 +929,7 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..457972aa5632 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -202,9 +202,6 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
- case IDH_REQ_RAS_BAD_PAGES:
- event = IDH_RAS_BAD_PAGES_READY;
- break;
default:
break;
}
@@ -359,14 +356,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -397,10 +412,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +505,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index d5002ff931d8..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 6cc05d36e359..64b240b51f1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -149,12 +149,12 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
- for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
+ 0x80000000, 0x8000FFFF, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -397,18 +397,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
msleep(500);
- offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
-
- ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
- 0);
-
- if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
- return -EINVAL;
- }
-
- DRM_INFO("psp mode1 reset succeed \n");
-
return 0;
}
@@ -665,7 +653,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
- .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw,
+ .wait_for_bootloader = psp_v11_0_wait_for_bootloader
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index e6d8eddda2bf..db6e41967f12 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1377,7 +1377,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
- if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 1):
@@ -1385,11 +1385,11 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 2):
- if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
- if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9e74c9822e62..9785fada4fa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e590cbdd8de9..8dc32787d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c74947705d77..1e89ba153d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 68b4371df0f1..b115137ab2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,8 +136,6 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
@@ -232,14 +230,9 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -276,8 +269,6 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
r = amdgpu_vcn_sw_fini(adev, 0);
- kfree(adev->vcn.ip_dump);
-
return r;
}
@@ -865,6 +856,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
vcn_v2_0_enable_static_power_gating(vinst);
@@ -948,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -2095,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2168,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bc30a5326866..904b94bc8693 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -116,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -149,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -167,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -177,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -218,6 +194,7 @@ pg_lock:
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
@@ -297,8 +274,6 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
@@ -423,14 +398,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -477,8 +447,6 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1035,6 +1003,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1125,8 +1094,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -2127,66 +2101,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2200,8 +2114,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2217,8 +2131,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 4b8f4407047f..95173156f956 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -175,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
/*
@@ -304,14 +302,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -361,7 +354,6 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
return 0;
}
@@ -1042,6 +1034,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1134,8 +1127,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -2342,67 +2340,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2416,8 +2353,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1924e075b66f..d0d27790b73b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -255,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -315,8 +308,6 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1012,6 +1003,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1094,8 +1086,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1624,7 +1621,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
vcn_v4_0_stop_dpg_mode(vinst);
- r = 0;
goto done;
}
@@ -2246,67 +2242,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2320,8 +2255,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2a3663b551af..7b93a275ec4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
+}
+
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -213,10 +224,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -231,20 +238,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -287,8 +285,6 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -391,7 +387,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -851,7 +847,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -944,8 +940,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1872,71 +1873,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1947,8 +1887,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index caf2d95a85d4..75c884a8f556 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -147,9 +147,6 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -233,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
+
+ return r;
}
/**
@@ -284,8 +275,6 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -926,6 +915,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1006,8 +996,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1704,67 +1699,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1778,8 +1712,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 07a6e9582880..455f829b8bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-}
-
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -201,7 +186,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -251,8 +238,6 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -713,6 +698,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -766,8 +752,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1428,67 +1419,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1502,8 +1432,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index cdefd7fcb0da..4b01e35ad7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -40,6 +40,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
@@ -163,7 +197,17 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- vcn_v5_0_0_alloc_ip_dump(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -209,8 +253,6 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -284,7 +326,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -605,7 +647,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -666,8 +708,13 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* resetting ring, fw should not check RB ring */
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
@@ -1475,8 +1522,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1557,7 +1604,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
- 14, 15, /* VCN */
+ 14, 15, 47, /* VCN [D|V|S] */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1603,6 +1650,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
goto late_fini;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 828a9ceef1e7..43115a374469 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1070,7 +1070,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -3252,8 +3257,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3266,8 +3273,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 79251f22b702..5d7eb0234002 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -260,20 +260,7 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
-{
- unsigned long upages = 0;
- unsigned long i;
-
- for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
- }
- return upages;
-}
-
-static int
+static long
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
@@ -282,7 +269,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ long mpages;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
@@ -296,6 +283,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
+ mpages = 0;
for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
@@ -356,13 +344,14 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- for (i = 0; i < npages && mpages; i++) {
+ while (i-- && mpages) {
if (!dst[i])
continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
mpages--;
}
+ mpages = r;
}
#ifdef DEBUG_FORCE_MIXED_DOMAINS
@@ -380,7 +369,7 @@ out_free_vram_pages:
}
#endif
- return r;
+ return mpages;
}
static long
@@ -395,7 +384,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
- unsigned long mpages = 0;
+ long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -441,15 +430,17 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
else
pr_debug("0x%lx pages collected\n", cpages);
- r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ if (mpages >= 0)
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
+ else
+ r = mpages;
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -459,14 +450,13 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger, r);
out:
- if (!r && mpages) {
+ if (!r && mpages > 0) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
-
- return mpages;
}
- return r;
+
+ return r ? r : mpages;
}
/**
@@ -577,7 +567,7 @@ static void svm_migrate_page_free(struct page *page)
}
}
-static int
+static long
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t npages)
@@ -586,6 +576,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t *src;
dma_addr_t *dst;
struct page *dpage;
+ long mpages;
uint64_t i = 0, j;
uint64_t addr;
int r = 0;
@@ -598,6 +589,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
src = (uint64_t *)(scratch + npages);
dst = scratch;
+ mpages = 0;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
@@ -646,6 +638,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
+ mpages++;
j++;
}
@@ -655,13 +648,17 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
out_oom:
if (r) {
pr_debug("failed %d copy to ram\n", r);
- while (i--) {
+ while (i-- && mpages) {
+ if (!migrate->dst[i])
+ continue;
svm_migrate_put_sys_page(dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
+ mpages = r;
}
- return r;
+ return mpages;
}
/**
@@ -688,9 +685,8 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
unsigned long cpages = 0;
- unsigned long mpages = 0;
+ long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -736,7 +732,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -745,13 +740,15 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
else
pr_debug("0x%lx pages collected\n", cpages);
- r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ if (mpages >= 0)
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
+ else
+ r = mpages;
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -764,8 +761,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages > 0) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -848,6 +844,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 67694bcd9464..d01ef5ac0766 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a0f22ea6d15a..521c14c7a789 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1292,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1305,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1412,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1714,6 +1713,29 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner, NULL,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 4ec73f33535e..82dbd68d8c99 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -530,6 +530,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index 8bc36f04b1b7..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -46,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -73,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -89,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cd0e2976e268..242f98564261 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -82,6 +80,7 @@
#include <linux/component.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -102,15 +101,6 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -4773,8 +4817,8 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
uint32_t *user_brightness)
{
u32 brightness = scale_input_to_fw(min, max, *user_brightness);
- u8 prev_signal = 0, prev_lum = 0;
- int i = 0;
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
return;
@@ -4782,32 +4826,44 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
if (!caps->data_points)
return;
- /* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
- i = caps->data_points/2;
- do {
- u8 signal = caps->luminance_data[i].input_signal;
- u8 lum = caps->luminance_data[i].luminance;
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
- /*
- * brightness == signal: luminance is percent numerator
- * brightness < signal: interpolate between previous and current luminance numerator
- * brightness > signal: find next data point
- */
- if (brightness > signal) {
- prev_signal = signal;
- prev_lum = lum;
- i++;
- continue;
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
}
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- *user_brightness = scale_fw_to_input(min, max,
- DIV_ROUND_CLOSEST(lum * brightness, 101));
- return;
- } while (i < caps->data_points);
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
}
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4926,10 +4982,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -6390,13 +6444,15 @@ static void fill_stream_properties_from_drm_display_mode(
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -7792,6 +7848,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
@@ -7803,6 +7862,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -8378,8 +8445,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
- (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ if (encoder)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
@@ -8498,6 +8564,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -10086,69 +10164,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -10162,7 +10211,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -10170,29 +10219,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -10236,7 +10269,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10244,6 +10277,45 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10346,6 +10418,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10397,6 +10471,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
uint8_t cnt;
+
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
if (acrtc->dm_irq_params.window_param[cnt].enable) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b937da0a4e4a..ce74125c713e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -152,6 +153,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
@@ -753,6 +768,9 @@ struct amdgpu_dm_connector {
uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..c7387af725d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -27,7 +28,6 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 033bd817d871..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 3da056c8d20b..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 010172f930ae..45feb404b097 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index b726bcd18e29..f263e1a4537e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -3106,6 +3107,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3324,7 +3354,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3502,6 +3533,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b1d1897f5eaf..437174d4fed5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9e3e51a2dc49..fe100e4c9801 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index b61e210f6246..a1c722112c22 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index ba17c23b2706..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6c9de834455b..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7187d5aedf0a..6a817508c826 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -1762,14 +1794,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1866,8 +1904,10 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_link_bw = aconnector->mst_local_bw;
}
- if (end_link_bw > 0 && stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..65f76a7d00db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..e5771f490f2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index ff7b867ae98b..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index e2366321a3c1..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..82ea3fe5e764 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..132de4071efd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 95f890fda8aa..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3c9ecea7eebc..dc943abd6dba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -37,6 +37,7 @@ DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
DC_LIBS += dml2
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index d897f8a30ede..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 452206b5095e..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 67f08495b7e6..154fd2c18e88 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id;
}
- if (tbl->ucNumberOfObjects <= i) {
- dm_error("Can't find connector id %d in connector table of size %d.\n",
- i, tbl->ucNumberOfObjects);
+ if (tbl->ucNumberOfObjects <= i)
return object_id;
- }
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bcae0643e61..58e88778da7f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv =
- (uint8_t)bp_params->fractional_feedback_divider;
+ (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index f5ad0a177038..dbd6ef1b60a0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
@@ -391,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -405,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index f8409453434c..13cf415e38e5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
+ struct dc *dc = context->clk_mgr->ctx->dc;
int j;
int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
+
for (j = 0; j < context->stream_count; j++) {
int k;
@@ -164,6 +170,23 @@ void dce110_fill_display_configs(
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
+
+ /* Find first CRTC index and calculate its line time.
+ * This is necessary for DPM on SI GPUs.
+ */
+ if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index = cfg->pipe_idx;
+ pp_display_cfg->line_time_in_us =
+ timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+ }
+
+ if (!num_cfgs) {
+ pp_display_cfg->crtc_index = 0;
+ pp_display_cfg->line_time_in_us = 0;
}
pp_display_cfg->display_count = num_cfgs;
@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
-
dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
- }
-
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index 0267644717b2..a39641a0ff09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- int dprefclk_wdivider;
- int dp_ref_clk_khz;
- int target_div;
+ struct dc_context *ctx = clk_mgr_base->ctx;
+ int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
+ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
+ else
+ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..8376e2b0e73d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index b59703467128..47ff4c965d76 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -1404,11 +1404,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 9ab0ee20ca6f..261b7d43e91d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -84,6 +84,7 @@
#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dml2/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -217,11 +218,24 @@ static bool create_links(
connectors_num,
num_virtual_links);
- // condition loop on link_count to allow skipping invalid indices
+ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
+ * but it doesn't say which indices are valid, and not every index has an actual connector.
+ * So, if we don't find a connector on an index, that is not an error.
+ *
+ * - There is no guarantee that the first N indices will be valid
+ * - VBIOS may report a higher amount of valid indices than there are actual connectors
+ * - Some VBIOS have valid configurations for more connectors than there actually are
+ * on the card. This may be because the manufacturer used the same VBIOS for different
+ * variants of the same card.
+ */
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
+ continue;
+
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx;
@@ -446,7 +460,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
- if (dc->optimized_required || dc->wm_optimized_required) {
+ if ((dc->optimized_required || dc->wm_optimized_required) &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true;
return false;
}
@@ -934,7 +950,9 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
@@ -1138,6 +1156,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -2398,6 +2419,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -3377,7 +3410,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -5089,129 +5122,6 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5469,12 +5379,10 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
@@ -6450,3 +6358,21 @@ bool dc_can_clear_cursor_limit(struct dc *dc)
return false;
}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ec4e80e5b6eb..d82b1cb467f4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -1177,6 +1177,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 130455f2802a..b7a5de4ecb61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -520,3 +520,10 @@ enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, cons
return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4d6181e7c612..d712548b1927 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -165,7 +165,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4d6bc9fd4faa..9ac2d41f8fca 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -316,6 +316,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +334,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 59c07756130d..b41e66c31e6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.340"
+#define DC_VER "3.2.348"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -234,6 +234,7 @@ struct lut3d_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
@@ -694,6 +695,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -839,7 +849,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_bw_allocation_mode:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -1072,6 +1083,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1145,6 +1157,11 @@ struct dc_debug_options {
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
uint32_t acpi_transition_bitmasks[MAX_PIPES];
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
};
@@ -1305,6 +1322,32 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
@@ -1733,7 +1776,7 @@ struct dc {
struct dml2_configuration_options dml2_options;
struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1786,6 +1829,23 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ uint32_t otg_inst;
+ uint32_t otg_underflow;
+ uint32_t h_position;
+ uint32_t v_position;
+ uint32_t otg_frame_count;
+ struct dc_underflow_per_hubp_debug_data {
+ uint32_t hubp_underflow;
+ uint32_t hubp_in_blank;
+ uint32_t hubp_readline;
+ uint32_t det_config_error;
+ } hubps[MAX_PIPES];
+ uint32_t curr_det_sizes[MAX_PIPES];
+ uint32_t target_det_sizes[MAX_PIPES];
+ uint32_t compbuf_config_error;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1804,8 +1864,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -2447,6 +2505,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*/
enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+/*
+ * Get if ALPM is supported by the link
+ */
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -2674,4 +2738,17 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
bool dc_is_cursor_limit_pending(struct dc *dc);
bool dc_can_clear_cursor_limit(struct dc *dc);
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f5ef1a07078e..53a088ebddef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -2010,11 +2010,12 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
return result;
}
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
- uint32_t count)
+ uint32_t count
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2042,9 +2043,54 @@ bool dmub_lsdma_send_linear_copy_packet(
return result;
}
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
bool dmub_lsdma_send_tiled_to_tiled_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
- struct lsdma_send_tiled_to_tiled_copy_command_params params)
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2066,8 +2112,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.src_y = params.src_y;
lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
- lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
- lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
@@ -2078,8 +2124,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.tmz = params.tmz;
lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
- lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
- lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
lsdma_data->u.tiled_copy_data.data_format = params.data_format;
lsdma_data->u.tiled_copy_data.max_com = params.max_com;
lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
@@ -2097,7 +2143,8 @@ bool dmub_lsdma_send_pio_copy_command(
uint64_t src_addr,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t overlap_disable)
+ uint32_t overlap_disable
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2130,7 +2177,8 @@ bool dmub_lsdma_send_pio_constfill_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t data)
+ uint32_t data
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 8ea320f21269..7ef93444ef3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -211,11 +211,45 @@ void dc_dmub_srv_fams2_passthrough_flip(
int surface_count);
bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
bool dmub_lsdma_send_pio_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 5ce1be362534..3a3ec38cdf8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1021,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates {
union dp_alpm_lttpr_cap {
struct {
uint8_t AUX_LESS_ALPM_SUPPORTED :1;
- uint8_t RESERVED :7;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
@@ -1119,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1347,7 +1349,9 @@ union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 7217de258851..51e41aed7316 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -732,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 375ca2f13b7a..619834a328a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -563,6 +563,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -571,6 +577,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -616,6 +628,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -728,6 +741,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -1137,6 +1151,10 @@ struct replay_config {
bool low_rr_supported;
/* Replay Video Conferencing Optimization Enabled */
bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 668ee2d405fd..0b8ed9b94d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -619,7 +619,7 @@ void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 0421b267a0b5..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -591,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4a9d07c31bc5..0c50fe266c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ff3b8244ba3d..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index e7a318e26d38..65b979617b0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,8 +3,8 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link.h"
#include "dc_dmub_srv.h"
-#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -44,45 +44,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
- struct dc_link *link)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
- struct pipe_ctx *pipe_ctx = NULL;
- struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
- uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable) {
+ if (enable)
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
- pipe_ctx = &res_ctx->pipe_ctx[i];
- //TODO: refactor for multi edp support
- break;
- }
- }
-
- if (!pipe_ctx)
- return;
-
- cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- }
- } else
+ else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -174,17 +150,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- if (pipe_ctx->stream_res.hpo_dp_stream_enc)
- copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- else
- copy_settings_data->hpo_stream_enc_inst = 0;
- if (pipe_ctx->link_res.hpo_dp_link_enc)
- copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- else
- copy_settings_data->hpo_link_enc_inst = 0;
- }
-
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -225,6 +190,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
@@ -247,7 +224,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
- pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index ccbe385e132c..e6346c0ffc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst, struct dc_link *link);
+ uint8_t panel_inst);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6160952245b4..18388fb00be8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -3229,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3401,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index a06217a9eef6..f6879e622271 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -8,7 +8,7 @@
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
+#include "soc_and_ip_translator.h"
static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
const struct dc *in_dc,
@@ -38,375 +38,37 @@ static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-/*
- * Populate dml_init based on default static values in soc bb. The default
- * values are for reference and support at least minimal operation of current
- * SoC and DCN hardware. The values could be modifed by subsequent override
- * functions to reflect our true hardware capability.
- */
-static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- switch (in_dc->ctx->dce_version) {
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
- dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
- dml_init->soc_bb = dml2_socbb_dcn401;
- dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
- dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
- memset(dml_init, 0, sizeof(*dml_init));
+ project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
- return;
- }
-}
-
-static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-}
-
-/*
- * SMU stands for System Management Unit. It is a power management processor.
- * It owns the initialization of dc's clock table and programming of clock values
- * based on dc's requests.
- * Our clock values in base soc bb is a dummy placeholder. The real clock values
- * are retrieved from SMU firmware to dc clock table at runtime.
- * This function overrides our dummy placeholder values with real values in dc
- * clock table.
- */
-static void override_dml_init_with_values_from_smu(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- int i;
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (!in_dc->clk_mgr->funcs->is_smu_present ||
- !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
- /* skip if smu is not present */
- return;
-
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-}
-
-static void override_dml_init_with_values_from_vbios(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (dc_bw_params->num_channels) {
- dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (dc_bw_params->dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
- } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ break;
}
- dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+ return project_id;
}
-
-static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- /*
- * TODO - There seems to be overlaps between the values overriden from
- * dmub and vbios. Investigate and identify the values that DMUB needs
- * to own.
- */
-// const struct dmub_soc_bb_params *dmub_bb_params =
-// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
-
-// if (dmub_bb_params == NULL)
-// return;
-
-// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
-// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
-// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
-// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
-// if (dmub_bb_params->fclk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
-// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
-// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
-// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
-// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_min_idle_time_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
-// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
-// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #else
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #endif
-// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
-// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
-// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
-// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
-}
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
-static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (!config->use_native_soc_bb_construction) {
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
-}
-
-void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- populate_default_dml_init_params(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_smu(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -463,11 +125,7 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -1165,6 +823,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 03de3cf06ae5..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -60,7 +60,7 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
DC_FP_START();
- dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
dml2_initialize_instance(&dml_ctx->v21.dml_init);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index b05030926ce8..91955bbe24b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -159,6 +159,8 @@ struct dml2_dchub_watermark_regs {
uint32_t sr_exit;
uint32_t sr_enter_z8;
uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 8c9f414aa6bf..176f55947664 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -96,6 +96,8 @@ struct dml2_soc_power_management_parameters {
double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 98c0234e2f47..7de10a95cfdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -417,6 +417,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b9cff2198511..bf62d42b3f78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize(
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -4122,11 +4131,12 @@ static noinline_for_stack void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..640087e862f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 28687565ac22..ffb8c09f37a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,6 +201,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -877,6 +879,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -1016,6 +1021,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -1851,9 +1858,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1879,7 +1888,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index cd3fbc0591d8..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 5f1b49a50049..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index a56e75cdf712..c59f825cfae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -654,14 +654,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +669,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 75fb77bca83b..01480a04f85e 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -520,6 +520,15 @@ void dpp1_dppclk_control(
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
+void dpp_force_disable_cursor(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Force disable cursor */
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c48139bed11f..f466182963f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+void dpp_force_disable_cursor(struct dpp *dpp_base);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 2d70586cef40..09be2a90cc79 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -1494,6 +1494,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_force_disable_cursor = dpp_force_disable_cursor,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 5a6a861402b3..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -673,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 1f53a9f0c0ac..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -1157,6 +1157,11 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
+
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
/* increase minimum slice count to meet sink throughput limitations */
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index d347bb06577a..e7e5f6d4778e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,6 +440,35 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0],
+ DET0_SIZE, &target_det_sizes[0]);
+
+ REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1],
+ DET1_SIZE, &target_det_sizes[1]);
+
+ REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2],
+ DET2_SIZE, &target_det_sizes[2]);
+
+ REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3],
+ DET3_SIZE, &target_det_sizes[3]);
+
+}
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t compbuf_config_error = 0;
+
+ REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR,
+ &compbuf_config_error);
+
+ return compbuf_config_error;
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -457,6 +486,8 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..49a469969d36 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,10 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_get_det_sizes(struct hubbub *hubbub,
+ uint32_t *curr_det_sizes,
+ uint32_t *target_det_sizes);
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b98505b240a7..cdb20251a154 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 32a6be543105..92957398ac0a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -1009,6 +1009,8 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6d41953011f5..a443722a8632 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -589,6 +589,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..a36273a52880 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,8 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index f8f991785d4f..cf2eb9793008 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,8 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +250,8 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +624,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -671,6 +675,7 @@ struct dcn_fl_regs_st {
uint32_t lut_done;
uint32_t lut_addr_mode;
uint32_t lut_width;
+ uint32_t lut_mpc_width;
uint32_t lut_tmz;
uint32_t lut_crossbar_sel_r;
uint32_t lut_crossbar_sel_g;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 62369be070ea..f325db555102 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -264,6 +264,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0da70b50e86d..556214b2227d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -505,6 +505,30 @@ void hubp3_init(struct hubp *hubp)
hubp_reset(hubp);
}
+uint32_t hubp3_get_current_read_line(struct hubp *hubp)
+{
+ uint32_t read_line = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(HUBPRET_READ_LINE_VALUE,
+ PIPE_READ_LINE,
+ &read_line);
+
+ return read_line;
+}
+
+unsigned int hubp3_get_underflow_status(struct hubp *hubp)
+{
+ uint32_t hubp_underflow = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_UNDERFLOW_STATUS,
+ &hubp_underflow);
+
+ return hubp_underflow;
+}
+
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -534,6 +558,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b7d7adf0b58c..842f4eb72cc8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -299,6 +300,11 @@ void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 7fd582a8a4ba..47101847c2b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -98,6 +110,9 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index f3a21c623f44..a5f23bb2a76a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -206,6 +206,9 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 6d060ba12da8..b140808f21af 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -218,6 +218,9 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 705b98b1b6cc..0fcbc6a35be6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -1033,6 +1070,10 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 608e6153fa68..fdabbeec8ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -252,7 +252,9 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4ea13d0bf815..153d68375fa3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -671,6 +671,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -1269,7 +1270,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1289,7 +1290,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1600,19 +1601,17 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
}
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output = {0};
+ build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output);
- build_audio_output(context, pipe_ctx, &audio_output);
-
- link_hwss->setup_audio_output(pipe_ctx, &audio_output,
+ link_hwss->setup_audio_output(pipe_ctx, &pipe_ctx->stream_res.audio_output,
pipe_ctx->stream_res.audio->inst);
pipe_ctx->stream_res.audio->funcs->az_configure(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.crtc_info,
&pipe_ctx->stream->audio_info,
- &audio_output.dp_link_info);
+ &pipe_ctx->stream_res.audio_output.dp_link_info);
if (dc->config.disable_hbr_audio_dp2)
if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio &&
@@ -2254,7 +2253,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
@@ -2386,9 +2385,7 @@ static void dce110_setup_audio_dto(
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
-
- build_audio_output(context, pipe_ctx, &audio_output);
+ build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output);
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
struct dtbclk_dto_params dto_params = {0};
@@ -2399,14 +2396,14 @@ static void dce110_setup_audio_dto(
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
} else
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
break;
}
}
@@ -2426,15 +2423,15 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output = {0};
-
- build_audio_output(context, pipe_ctx, &audio_output);
+ build_audio_output(context,
+ pipe_ctx,
+ &pipe_ctx->stream_res.audio_output);
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 7cd8c1576988..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -114,5 +114,12 @@ void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 39910f73ecd0..506c3bbbf221 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -328,19 +328,25 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
DTN_INFO("\n=======HUBP FL======\n");
- DTN_INFO(
- "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ uint32_t values[] = {
pool->hubps[i]->inst,
fl_regs->lut_enable,
fl_regs->lut_done,
fl_regs->lut_addr_mode,
fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
fl_regs->lut_tmz,
fl_regs->lut_crossbar_sel_r,
fl_regs->lut_crossbar_sel_g,
@@ -351,8 +357,13 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
fl_regs->lut_fl_bias,
fl_regs->lut_fl_scale,
fl_regs->lut_fl_mode,
- fl_regs->lut_fl_format);
- DTN_INFO("\n");
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
}
}
@@ -541,19 +552,43 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
DTN_INFO("===== MPC RMCM 3DLUT =====\n");
- DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
- if (s.opp_id != 0xf)
- DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
- i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
- s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
- s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
- s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
- s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
- s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
}
DTN_INFO("\n");
DTN_INFO("===== MPC RMCM Shaper =====\n");
@@ -3628,6 +3663,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3646,7 +3683,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3663,6 +3700,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3709,30 +3750,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3802,30 +3831,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 3207addbd4eb..7d24fa1517bf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -3054,6 +3054,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 37a239219dfe..139a63101488 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -1228,3 +1228,51 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (tg) {
+ uint32_t v_blank_start = 0, v_blank_end = 0;
+
+ out_data->otg_inst = tg->inst;
+
+ tg->funcs->get_scanoutpos(tg,
+ &v_blank_start,
+ &v_blank_end,
+ &out_data->h_position,
+ &out_data->v_position);
+
+ out_data->otg_frame_count = tg->funcs->get_frame_count(tg);
+
+ out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg);
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+
+ if (hubp) {
+ if (hubp->funcs->hubp_get_underflow_status)
+ out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp);
+
+ if (hubp->funcs->hubp_in_blank)
+ out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp);
+
+ if (hubp->funcs->hubp_get_current_read_line)
+ out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp);
+
+ if (hubp->funcs->hubp_get_det_config_error)
+ out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp);
+ }
+ }
+
+ if (hubbub->funcs->get_det_sizes)
+ hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes);
+
+ if (hubbub->funcs->compbuf_config_error)
+ out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2ac5d54d1626..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 556f4fe57eda..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index e68f21fd5f0f..560984533950 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -528,3 +528,75 @@ void dcn314_disable_link_output(struct dc_link *link,
apply_symclk_on_tx_off_wa(link);
}
+
+/**
+ * dcn314_dpp_pg_control - DPP power gate control.
+ *
+ * @hws: dce_hwseq reference.
+ * @dpp_inst: DPP instance reference.
+ * @power_on: true if we want to enable power gate, false otherwise.
+ *
+ * Enable or disable power gate in the specific DPP instance.
+ * If power gating is disabled, will force disable cursor in the DPP instance.
+ */
+void dcn314_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate) {
+ /* Workaround for DCN314 with disabled power gating */
+ if (!power_on) {
+
+ /* Force disable cursor if power gating is disabled */
+ struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp && dpp->funcs->dpp_force_disable_cursor)
+ dpp->funcs->dpp_force_disable_cursor(dpp);
+ }
+ return;
+ }
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index 2305ad282f21..6c072d0274ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index f5112742edf9..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
@@ -141,6 +142,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_pg_control = dcn314_dpp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index b971356d30b1..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a3ccf805bd16..52cc488416ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -128,6 +128,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 58f2be2a326b..e34efcb7bde5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -123,6 +123,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index cc9f40d97af2..d5b5e2ce6ff6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -965,6 +965,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1619,20 +1621,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..d6e11b7e4fce 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -104,6 +104,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 9df8030e37f7..1723bbcf2c46 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -47,6 +47,7 @@ struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
+struct dc_underflow_debug_data;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -475,6 +476,9 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f0d7185153b2..d30f94c35f11 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -228,8 +228,7 @@ struct resource_funcs {
enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output);
+ struct pipe_ctx *pipes);
};
struct audio_support{
@@ -361,6 +360,8 @@ struct stream_resource {
uint8_t gsl_group;
struct test_pattern_params test_pattern_params;
+
+ struct audio_output audio_output;
};
struct plane_resource {
@@ -433,7 +434,7 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
};
struct pipe_ctx {
@@ -683,6 +684,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..9bee45b36629 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -229,6 +229,8 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes);
+ uint32_t (*compbuf_config_error)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0c5675d1c593..1b7c085dc2cc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -349,6 +349,9 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
+
+ void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index cee29e89ec5c..2b874d2cc61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -288,7 +304,10 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 7641439f6ca0..22960ee03dee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -1059,21 +1069,6 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
- /**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
/**
* @mcm:
@@ -1098,6 +1093,7 @@ struct mpc_funcs {
* MPC RMCM new HW sequential programming functions
*/
struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 267ace4eef8a..f2de2cf23859 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -374,6 +374,7 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index f2503402c10e..0cce49d95e26 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -218,7 +218,10 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2956c2b3ad1a..23f41c99fa38 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -75,7 +75,9 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
bool is_hpo_acquired;
uint8_t count;
int i;
- struct audio_output audio_output[MAX_PIPES];
+
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -99,7 +101,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
link_setting, count,
- *pipes, &audio_output[0]);
+ *pipes);
for (i = 0; i < count; i++) {
pipes[i]->clock_source->funcs->program_pix_clk(
pipes[i]->clock_source,
@@ -111,15 +113,16 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
const struct link_hwss *link_hwss = get_link_hwss(
link, &pipes[i]->link_res);
- link_hwss->setup_audio_output(pipes[i], &audio_output[i],
- pipes[i]->stream_res.audio->inst);
+ link_hwss->setup_audio_output(pipes[i],
+ &pipes[i]->stream_res.audio_output,
+ pipes[i]->stream_res.audio->inst);
pipes[i]->stream_res.audio->funcs->az_configure(
pipes[i]->stream_res.audio,
pipes[i]->stream->signal,
- &audio_output[i].crtc_info,
+ &pipes[i]->stream_res.audio_output.crtc_info,
&pipes[i]->stream->audio_info,
- &audio_output[i].dp_link_info);
+ &pipes[i]->stream_res.audio_output.dp_link_info);
if (link->dc->config.disable_hbr_audio_dp2 &&
pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
@@ -138,12 +141,19 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
// Set DPMS on with stream update
- for (i = 0; i < state->stream_count; i++)
- if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
- stream_update.stream = state->streams[i];
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
}
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 8c8682f743d6..cb80b4599936 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2458,7 +2458,6 @@ void link_set_dpms_on(
struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
@@ -2502,8 +2501,6 @@ void link_set_dpms_on(
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
}
- link_hwss->setup_stream_attribute(pipe_ctx);
-
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
// Enable VPG before building infoframe
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index de1143dbbd25..31a73867cd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index aecaf37eee35..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -408,8 +408,10 @@ enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const stru
link = stream->link;
if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
- || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- && link->hpd_status))
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
continue;
dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 651926e547b9..caddb7dfb133 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1525,8 +1525,8 @@ bool read_is_mst_supported(struct dc_link *link)
return false;
}
- rev.raw = 0;
- cap.raw = 0;
+ rev.raw = 0;
+ cap.raw = 0;
st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
sizeof(rev));
@@ -2125,13 +2125,13 @@ void detect_edp_sink_caps(struct dc_link *link)
&backlight_adj_cap, sizeof(backlight_adj_cap));
link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false;
core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
&general_edp_cap, sizeof(general_edp_cap));
link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false;
set_default_brightness_aux(link);
@@ -2506,3 +2506,40 @@ bool dp_is_sink_present(struct dc_link *link)
return present;
}
+
+uint8_t dp_get_lttpr_count(struct dc_link *link)
+{
+ if (dp_is_lttpr_present(link))
+ return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ return 0;
+}
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ bool lttpr_present = dp_is_lttpr_present(link);
+
+ if (auxless_support == NULL || auxwake_support == NULL)
+ return;
+
+ *auxless_support = false;
+ *auxwake_support = false;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return;
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) {
+ if (lttpr_present) {
+ if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED)
+ *auxless_support = true;
+ } else
+ *auxless_support = true;
+ }
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) {
+ if (!lttpr_present)
+ *auxwake_support = true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 940b147cc5d4..7170db5a1c13 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -108,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+uint8_t dp_get_lttpr_count(struct dc_link *link);
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 819bf2d8ba53..8a3c18ae97a7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -48,8 +48,7 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->hpd_status
- && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
@@ -226,35 +225,40 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->hpd_status) {
- val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
+ if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index);
+ return false;
+ }
- if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- retrieve_usb4_dp_bw_allocation_info(link);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
- if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
+ retrieve_usb4_dp_bw_allocation_info(link);
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- ret = true;
-
- if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
- /*
- * During DP tunnel creation, the CM preallocates BW
- * and reduces the estimated BW of other DPIAs.
- * The CM releases the preallocation only when the allocation is complete.
- * Perform a zero allocation to make the CM release the preallocation
- * and correctly update the estimated BW for all DPIAs per host router.
- */
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
- } else
- DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
- }
+ if (
+ link->dpia_bw_alloc_config.nrd_max_link_rate
+ && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
+
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
return ret;
}
@@ -297,15 +301,12 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
{
if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpia_bw_alloc_config.bw_alloc_enabled) {
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
+ if (peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
link_dpia_send_bw_alloc_request(link, peak_bw);
- }
- //2. Cold Unplug
- else if (!link->hpd_status)
+ } else
dpia_bw_alloc_unplug(link);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 2dc1a660e504..134093ce5a8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1018,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
{
enum dc_status status;
uint8_t sink_status = 0;
- uint8_t i;
+ uint32_t i;
+ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10);
+
+ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL)
+ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case
/* clear training pattern set */
status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
@@ -1028,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < intra_hop_disable_time_ms; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e7927b8f5ba3..8b7b87b21c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -870,6 +870,8 @@ bool edp_setup_psr(struct dc_link *link,
psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+ psr_context->os_request_force_ffu = psr_config->os_request_force_ffu;
+
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
@@ -944,7 +946,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
link->replay_settings.replay_allow_active = *allow_active;
}
@@ -1029,6 +1031,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
+ replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu;
+
link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -1042,7 +1046,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
memset(&alpm_config, 0, sizeof(alpm_config));
- alpm_config.bits.ENABLE = 1;
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 0;
+ }
+
dm_helpers_dp_write_dpcd(
link->ctx,
link,
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..2a422e223bf2 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
@@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
- memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height);
memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index f3fb3fe13757..e1a0308dee57 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -287,13 +287,6 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I
}
}
-void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
-}
-
void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
@@ -611,7 +604,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .program_3dlut_size = mpc401_program_3dlut_size,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index eb0c68d0b0c7..fdc42f8ab3ff 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control(
bool lut_bank_a,
int mpcc_id);
-void mpc401_program_3dlut_size(
- struct mpc *mpc,
- bool is_17x17x17,
- int mpcc_id);
-
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index d159e3ed3bb3..ead92ad78a23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -62,6 +62,7 @@
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 72bff94cb57d..52d5ea98c86b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+
optc1_clear_optc_underflow(optc);
return true;
@@ -428,6 +430,21 @@ static void optc35_set_long_vtotal(
}
}
+static void optc35_wait_otg_disable(struct timing_generator *optc)
+{
+ struct optc *optc1;
+ uint32_t is_master_en;
+
+ if (!optc || !optc->ctx)
+ return;
+
+ optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en);
+ if (!is_master_en)
+ REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+}
+
static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -479,6 +496,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc35_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index ff79c38287df..5af13706e601 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -226,6 +226,11 @@ bool optc401_disable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
+ // wait until CRTC_CURRENT_MASTER_EN_STATE == 0
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 15000);
+
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 164ba796f64c..869a8e515fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -1111,12 +1111,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
+ (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1152,12 +1152,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index eb1e158d3436..2f23cc6df571 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -990,12 +990,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
memory_type_multiplier = MEMORY_TYPE_HBM;
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 895349d9ca07..201ed863b69e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2192,7 +2192,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3ed7f50554e2..ca17e5d8fdc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -2239,8 +2239,7 @@ struct resource_pool *dcn31_create_resource_pool(
enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output)
+ struct pipe_ctx *pipes)
{
struct dc_state *state = link->dc->current_state;
int i;
@@ -2255,7 +2254,7 @@ enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
// Setup audio
if (pipes[i].stream_res.audio != NULL)
- build_audio_output(state, &pipes[i], &audio_output[i]);
+ build_audio_output(state, &pipes[i], &pipes[i].stream_res.audio_output);
}
#else
/* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index c32c85ef0ba4..7e8fde65528f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -69,8 +69,7 @@ unsigned int dcn31_get_det_buffer_size(
enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output);
+ struct pipe_ctx *pipes);
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 82f966cf4ed2..20d714596021 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1141,7 +1141,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
- SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index b3988e38d0a6..068c123ea8a8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -708,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.use_max_lb = true,
.force_disable_subvp = false,
+ .disable_force_pstate_allow_on_hw_release = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = true,
.using_dml21 = true,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 2ae6831c31ef..0fc66487d800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
- SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
+ SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
new file mode 100644
index 000000000000..bc93356a0b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+# Makefile for bounding box component.
+# Floating point required due to nature of bounding box values
+
+soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
+soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
+
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+
+soc_and_ip_translator := soc_and_ip_translator.o
+soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+
+AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator)
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
new file mode 100644
index 000000000000..3190c76eb482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn4_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb)
+{
+ memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ const struct clk_bw_params *dc_bw_params,
+ bool use_clock_dc_limits)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table,
+ dc->clk_mgr->bw_params,
+ config->use_clock_dc_limits);
+ }
+}
+
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+
+ /* latencies in vbios are platform specific and should be used if provided */
+ if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0;
+}
+
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ /* set if the value is provided */
+ if (dc->bb_overrides.sr_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.fclk_clock_change_latency_ns)
+ soc_bb->power_management_parameters.fclk_change_blackout_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+
+ //Z8 values not expected nor used on DCN401 but still added for completeness
+ if (dc->bb_overrides.sr_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
+static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn401_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn401_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn401_get_ip_caps,
+};
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
new file mode 100644
index 000000000000..21d842857601
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN401_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "soc_and_ip_translator.h"
+#include "dml2/dml21/inc/dml_top_soc_parameter_types.h"
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+/* Functions that can be re-used by higher DCN revisions of this component */
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+
+#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
new file mode 100644
index 000000000000..c9e224d262c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn42_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn42_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn42_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn42_get_ip_caps,
+};
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
new file mode 100644
index 000000000000..914dcbb369a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN42_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "dml_top_soc_parameter_types.h"
+#include "soc_and_ip_translator.h"
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
new file mode 100644
index 000000000000..0fc0e5a6c171
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+
+static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
+ enum dce_version dc_version)
+{
+ switch (dc_version) {
+ case DCN_VERSION_4_01:
+ dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
+ default:
+ break;
+ }
+}
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version)
+{
+ struct soc_and_ip_translator *soc_and_ip_translator;
+
+ soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL);
+ if (!soc_and_ip_translator)
+ return NULL;
+
+ dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version);
+
+ return soc_and_ip_translator;
+}
+
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator)
+{
+ kfree(*soc_and_ip_translator);
+ *soc_and_ip_translator = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index ad088d70e189..6ffc74fc9dcd 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -44,6 +44,11 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link) {}
+static void virtual_stream_encoder_lvds_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing)
+{}
+
static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
@@ -115,6 +120,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
virtual_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
virtual_stream_encoder_dvi_set_stream_attribute,
+ .lvds_set_stream_attribute =
+ virtual_stream_encoder_lvds_set_stream_attribute,
.set_throttled_vcp_size =
virtual_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 0bafb6710761..338fdc651f2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -316,6 +316,7 @@ struct dmub_srv_hw_params {
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
+ bool override_hbr3_pll_vco;
};
/**
@@ -567,6 +568,7 @@ struct dmub_srv {
bool sw_init;
bool hw_init;
+ bool dpia_supported;
uint64_t fb_base;
uint64_t fb_offset;
@@ -597,6 +599,8 @@ struct dmub_notification {
enum dmub_notification_type type;
uint8_t link_index;
uint8_t result;
+ /* notify instance from DMUB */
+ uint8_t instance;
bool pending_notification;
union {
struct aux_reply_data aux_reply;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c587b3441e07..02a4a20e3560 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -843,7 +843,8 @@ union dmub_fw_boot_options {
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
- uint32_t reserved : 6; /**< reserved */
+ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
+ uint32_t reserved : 5; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -882,7 +883,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_fw_signals {
+union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -897,7 +898,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_driver_signals {
+union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -1990,18 +1991,19 @@ struct dmub_cmd_lsdma_data {
struct lsdma_tiled_copy_data {
uint32_t src_addr_lo;
uint32_t src_addr_hi;
+
uint32_t dst_addr_lo;
uint32_t dst_addr_hi;
uint32_t src_x : 16;
uint32_t src_y : 16;
- uint32_t src_width : 16;
- uint32_t src_height : 16;
-
uint32_t dst_x : 16;
uint32_t dst_y : 16;
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
uint32_t dst_width : 16;
uint32_t dst_height : 16;
@@ -2034,23 +2036,58 @@ struct dmub_cmd_lsdma_data {
uint32_t padding : 30;
} tiled_copy_data;
struct lsdma_linear_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
uint32_t count : 30;
uint32_t cache_policy_dst : 2;
uint32_t tmz : 1;
uint32_t cache_policy_src : 2;
uint32_t padding : 29;
-
+ } linear_copy_data;
+ struct lsdma_linear_sub_window_copy_data {
uint32_t src_lo;
uint32_t src_hi;
+
uint32_t dst_lo;
uint32_t dst_hi;
- } linear_copy_data;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t reserved0 : 22;
+ } linear_sub_window_copy_data;
struct lsdma_reg_write_data {
uint32_t reg_addr;
uint32_t reg_data;
} reg_write_data;
struct lsdma_pio_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2063,12 +2100,11 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t src_lo;
- uint32_t src_hi;
- uint32_t dst_lo;
- uint32_t dst_hi;
} pio_copy_data;
struct lsdma_pio_constfill_data {
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2081,14 +2117,12 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t dst_lo;
- uint32_t dst_hi;
+
uint32_t data;
} pio_constfill_data;
uint32_t all[14];
} u;
-
};
struct dmub_rb_cmd_lsdma {
@@ -2330,6 +2364,7 @@ struct dmub_cmd_fams2_global_config {
union dmub_fams2_global_feature_config features;
uint32_t recovery_timeout_us;
uint32_t hwfq_flip_programming_delay_us;
+ uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target
};
union dmub_cmd_fams2_config {
@@ -3986,6 +4021,10 @@ enum dmub_cmd_replay_type {
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
/**
+ * Set version
+ */
+ DMUB_CMD__REPLAY_SET_VERSION = 9,
+ /**
* Set Replay General command.
*/
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
@@ -4015,6 +4054,10 @@ struct dmub_alpm_auxless_data {
uint16_t lfps_t1_t2_override_us;
short lfps_t1_t2_offset_us;
uint8_t lttpr_count;
+ /*
+ * Padding to align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
};
/**
@@ -4048,14 +4091,6 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
- * @hpo_stream_enc_inst: HPO stream encoder instance
- */
- uint8_t hpo_stream_enc_inst;
- /**
- * @hpo_link_enc_inst: HPO link encoder instance
- */
- uint8_t hpo_link_enc_inst;
- /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -4099,13 +4134,77 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
-
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
/**
* @pad: Align structure to 4 byte boundary.
*/
uint8_t pad[2];
};
+
+/**
+ * Replay versions.
+ */
+enum replay_version {
+ /**
+ * FreeSync Replay
+ */
+ REPLAY_VERSION_FREESYNC_REPLAY = 0,
+ /**
+ * Panel Replay
+ */
+ REPLAY_VERSION_PANEL_REPLAY = 1,
+ /**
+ * Replay not supported.
+ */
+ REPLAY_VERSION_UNSUPPORTED = 0xFF,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command.
+ */
+struct dmub_cmd_replay_set_version_data {
+ /**
+ * Panel Instance.
+ * Panel instance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * PSR version that FW should implement.
+ */
+ enum replay_version version;
+ /**
+ * PSR control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+struct dmub_rb_cmd_replay_set_version {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_cmd_replay_set_version_data replay_set_version_data;
+};
+
/**
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -4470,6 +4569,10 @@ union dmub_replay_cmd_set {
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
/**
+ * Definition of DMUB_CMD__REPLAY_SET_VERSION command data.
+ */
+ struct dmub_cmd_replay_set_version_data version_data;
+ /**
* Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
*/
struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
@@ -4685,21 +4788,25 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
+ * LSDMA copies data from source to destination linearly in sub window
+ */
+ DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
+ /**
* Send the tiled-to-tiled copy command
*/
- DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3,
/**
* Send the poll reg write command
*/
- DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 4,
/**
* Send the pio copy command
*/
- DMUB_CMD__LSDMA_PIO_COPY = 4,
+ DMUB_CMD__LSDMA_PIO_COPY = 5,
/**
* Send the pio constfill command
*/
- DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 6,
};
struct abm_ace_curve {
@@ -5914,6 +6021,9 @@ enum ips_residency_mode {
IPS_RESIDENCY__IPS2,
IPS_RESIDENCY__IPS1_RCG,
IPS_RESIDENCY__IPS1_ONO2_ON,
+ IPS_RESIDENCY__IPS1_Z8_RETENTION,
+ IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS,
+ IPS_RESIDENCY__PG_ONO_CURRENT_STATE
};
#define NUM_IPS_HISTOGRAM_BUCKETS 16
@@ -5927,6 +6037,8 @@ struct dmub_ips_residency_info {
uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
uint64_t total_time_us;
uint64_t total_inactive_time_us;
+ uint32_t ono_pg_state_at_collection;
+ uint32_t ono_pg_state_last_seen_in_ips;
};
/**
@@ -6223,6 +6335,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
*/
struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_rb_cmd_replay_set_version replay_set_version;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -6431,15 +6547,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
- const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (const uint8_t *)cmd;
uint8_t i;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
// copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ for (i = 0; i < DMUB_RB_CMD_SIZE; i++)
*dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -6464,6 +6583,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
const uint8_t *src = (const uint8_t *)cmd;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
@@ -6509,6 +6631,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
uint32_t num_cmds,
uint32_t *next_rptr)
{
+ if (rb->capacity == 0)
+ return;
+
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
if (*next_rptr >= rb->capacity)
@@ -6572,6 +6697,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb,
*/
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_empty(rb))
return false;
@@ -6596,6 +6724,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
+ if (rb->capacity == 0)
+ return;
+
while (rptr != wptr) {
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 3f38db752b84..4777c7203b2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -377,6 +377,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 2228d62adc7e..834e5434ccb8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -400,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub)
void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 567c5b1aeb7a..e7a58b140388 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
switch (cmd.cmd_common.header.type) {
case DMUB_OUT_CMD__DP_AUX_REPLY:
notify->type = DMUB_NOTIFICATION_AUX_REPLY;
- notify->link_index = cmd.dp_aux_reply.control.instance;
+ notify->instance = cmd.dp_aux_reply.control.instance;
notify->result = cmd.dp_aux_reply.control.result;
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
@@ -84,17 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->type = DMUB_NOTIFICATION_HPD_IRQ;
}
- notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->instance = cmd.dp_hpd_notify.hpd_data.instance;
notify->result = AUX_RET_SUCCESS;
break;
case DMUB_OUT_CMD__SET_CONFIG_REPLY:
notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
- notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->instance = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
- notify->link_index = cmd.dpia_notification.payload.header.instance;
+ notify->instance = cmd.dpia_notification.payload.header.instance;
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5fc29164e4b4..8aea50aa9533 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -213,6 +213,11 @@ enum {
#endif
#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define DEVICE_ID_NV_143F 0x143F
+#define DEVICE_ID_NV_13F9 0x13F9
+#define DEVICE_ID_NV_13FA 0x13FA
+#define DEVICE_ID_NV_13FB 0x13FB
+#define DEVICE_ID_NV_13FC 0x13FC
+#define DEVICE_ID_NV_13DB 0x13DB
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define DEVICE_ID_VGH_1435 0x1435
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e58e7b93810b..6b7db8ec9a53 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 5c86423c2e92..3d083010e734 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -211,7 +211,7 @@ atom_bios_string = "ATOM"
};
*/
-#pragma pack(1) /* BIOS data must use byte aligment*/
+#pragma pack(1) /* BIOS data must use byte alignment*/
enum atombios_image_offset{
OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
@@ -255,8 +255,8 @@ struct atom_rom_header_v2_2
uint16_t subsystem_vendor_id;
uint16_t subsystem_id;
uint16_t pci_info_offset;
- uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position
- uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position
+ uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position
+ uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position
uint16_t reserved;
uint32_t pspdirtableoffset;
};
@@ -453,7 +453,7 @@ struct atom_dtd_format
uint8_t refreshrate;
};
-/* atom_dtd_format.modemiscinfo defintion */
+/* atom_dtd_format.modemiscinfo definition */
enum atom_dtd_format_modemiscinfo{
ATOM_HSYNC_POLARITY = 0x0002,
ATOM_VSYNC_POLARITY = 0x0004,
@@ -678,7 +678,7 @@ struct lcd_info_v2_1
uint32_t reserved1[8];
};
-/* lcd_info_v2_1.panel_misc defintion */
+/* lcd_info_v2_1.panel_misc definition */
enum atom_lcd_info_panel_misc{
ATOM_PANEL_MISC_FPDI =0x0002,
};
@@ -716,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id {
/* gpio_id pre-define id for multiple usage */
/* GPIO use to control PCIE_VDDC in certain SLT board */
PCIE_VDDC_CONTROL_GPIO_PINID = 56,
- /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */
+ /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */
PP_AC_DC_SWITCH_GPIO_PINID = 60,
/* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */
VDDC_VRHOT_GPIO_PINID = 61,
@@ -734,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id {
struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
- /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+ /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -997,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def {
enum atom_display_device_tag_def{
ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display
- ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability
+ ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility
ATOM_DISPLAY_DFP1_SUPPORT = 0x0008,
ATOM_DISPLAY_DFP2_SUPPORT = 0x0080,
ATOM_DISPLAY_DFP3_SUPPORT = 0x0200,
@@ -1011,7 +1011,7 @@ struct atom_display_object_path_v2
{
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view;
uint16_t encoder_recordoffset;
uint16_t extencoder_recordoffset;
@@ -1023,7 +1023,7 @@ struct atom_display_object_path_v2
struct atom_display_object_path_v3 {
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t reserved1; //only on USBC case, otherwise always = 0
uint16_t reserved2; //reserved and always = 0
uint16_t reserved3; //reserved and always = 0
@@ -3547,7 +3547,7 @@ struct atom_voltage_object_header_v4{
enum atom_voltage_object_mode
{
VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4
- VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4
+ VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4
VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4
VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4
VOLTAGE_OBJ_EVV = 8,
@@ -3585,7 +3585,7 @@ struct atom_gpio_voltage_object_v4
{
struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode
- uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table
+ uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
@@ -4507,8 +4507,8 @@ struct amd_acpi_description_header{
struct uefi_acpi_vfct{
struct amd_acpi_description_header sheader;
uint8_t tableUUID[16]; //0x24
- uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
- uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+ uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
uint32_t reserved[4]; //0x3C
};
@@ -4540,7 +4540,7 @@ struct gop_lib1_content {
/*
***************************************************************************
Scratch Register definitions
- Each number below indicates which scratch regiser request, Active and
+ Each number below indicates which scratch register request, Active and
Connect all share the same definitions as display_device_tag defines
***************************************************************************
*/
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2b1ea7467b0..2f7e4b5bebf3 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v14_0_ip_block;
+enum smu_temp_metric_type {
+ SMU_TEMP_METRIC_BASEBOARD,
+ SMU_TEMP_METRIC_GPUBOARD,
+ SMU_TEMP_METRIC_MAX,
+};
+
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
};
@@ -496,6 +502,8 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
+ bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type);
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
@@ -1595,6 +1603,79 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+enum amdgpu_vr_temp {
+ AMDGPU_VDDCR_VDD0_TEMP,
+ AMDGPU_VDDCR_VDD1_TEMP,
+ AMDGPU_VDDCR_VDD2_TEMP,
+ AMDGPU_VDDCR_VDD3_TEMP,
+ AMDGPU_VDDCR_SOC_A_TEMP,
+ AMDGPU_VDDCR_SOC_C_TEMP,
+ AMDGPU_VDDCR_SOCIO_A_TEMP,
+ AMDGPU_VDDCR_SOCIO_C_TEMP,
+ AMDGPU_VDD_085_HBM_TEMP,
+ AMDGPU_VDDCR_11_HBM_B_TEMP,
+ AMDGPU_VDDCR_11_HBM_D_TEMP,
+ AMDGPU_VDD_USR_TEMP,
+ AMDGPU_VDDIO_11_E32_TEMP,
+ AMDGPU_VR_MAX_TEMP_ENTRIES,
+};
+
+enum amdgpu_system_temp {
+ AMDGPU_UBB_FPGA_TEMP,
+ AMDGPU_UBB_FRONT_TEMP,
+ AMDGPU_UBB_BACK_TEMP,
+ AMDGPU_UBB_OAM7_TEMP,
+ AMDGPU_UBB_IBC_TEMP,
+ AMDGPU_UBB_UFPGA_TEMP,
+ AMDGPU_UBB_OAM1_TEMP,
+ AMDGPU_OAM_0_1_HSC_TEMP,
+ AMDGPU_OAM_2_3_HSC_TEMP,
+ AMDGPU_OAM_4_5_HSC_TEMP,
+ AMDGPU_OAM_6_7_HSC_TEMP,
+ AMDGPU_UBB_FPGA_0V72_VR_TEMP,
+ AMDGPU_UBB_FPGA_3V3_VR_TEMP,
+ AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP,
+ AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP,
+ AMDGPU_RETIMER_0_1_0V9_VR_TEMP,
+ AMDGPU_RETIMER_4_5_0V9_VR_TEMP,
+ AMDGPU_RETIMER_2_3_0V9_VR_TEMP,
+ AMDGPU_RETIMER_6_7_0V9_VR_TEMP,
+ AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP,
+ AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP,
+ AMDGPU_IBC_HSC_TEMP,
+ AMDGPU_IBC_TEMP,
+ AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32,
+};
+
+enum amdgpu_node_temp {
+ AMDGPU_RETIMER_X_TEMP,
+ AMDGPU_OAM_X_IBC_TEMP,
+ AMDGPU_OAM_X_IBC_2_TEMP,
+ AMDGPU_OAM_X_VDD18_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_B_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_D_VR_TEMP,
+ AMDGPU_NODE_MAX_TEMP_ENTRIES = 12,
+};
+
+struct amdgpu_gpuboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES];
+ uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES];
+};
+
struct amdgpu_partition_metrics_v1_0 {
struct metrics_table_header common_header;
/* Current clocks (Mhz) */
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index d85ffab2aff9..c04bd351b250 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_SET_SE_MODE = 17,
MES_SCH_API_SET_GANG_SUBMIT = 18,
MES_SCH_API_SET_HW_RSRC_1 = 19,
+ MES_SCH_API_INV_TLBS = 20,
MES_SCH_API_MAX = 0xFF
};
@@ -870,6 +871,35 @@ union MESAPI__SET_GANG_SUBMIT {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+/*
+ * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid
+ * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2
+ * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1
+ * @hub_id 0-gc_hub, 1-mm_hub
+ */
+struct INV_TLBS {
+ uint8_t inv_sel;
+ uint8_t flush_type;
+ uint16_t inv_sel_id;
+ uint32_t hub_id;
+ /* If following two inv_range setting are all 0 , whole VM will be invalidated,
+ * otherwise only required range be invalidated
+ */
+ uint64_t inv_range_va_start;
+ uint64_t inv_range_size;
+ uint64_t reserved;
+};
+
+union MESAPI__INV_TLBS {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ struct INV_TLBS invalidate_tlbs;
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 71d986dd7a6e..518d07afc7df 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -764,10 +764,6 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
- if (adev->cper.enabled)
- if (amdgpu_cper_generate_bp_threshold_record(adev))
- dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
-
return ret;
}
@@ -824,6 +820,21 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -2038,6 +2049,66 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
}
/**
+ * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific temperature type, If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (!pp_funcs->get_temp_metrics ||
+ !amdgpu_dpm_is_temp_metrics_supported(adev, type))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
+ * is available
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ *
+ * This function returns metrics if specific temperature metrics type is supported or not.
+ *
+ * Return: True in case of metrics type supported else false.
+ */
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ bool support_temp_metrics = false;
+
+ if (!pp_funcs->temp_metrics_is_supported)
+ return support_temp_metrics;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_temp_metrics =
+ pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_temp_metrics;
+}
+
+/**
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
* partition
* @adev: Pointer to the device.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 4b64851fdb42..5230276628a3 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2073,6 +2073,134 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
return 0;
}
+/**
+ * DOC: board
+ *
+ * Certain SOCs can support various board attributes reporting. This is useful
+ * for user application to monitor various board reated attributes.
+ *
+ * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
+ * only two types of attributes are reported, baseboard temperature and
+ * gpu board temperature. Both of them are reported as binary files.
+ *
+ * * .. code-block:: console
+ *
+ * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
+ *
+ */
+
+/**
+ * DOC: baseboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current baseboard
+ * temperature metrics data. The file baseboard_temp is used for this.
+ * Reading the file will dump all the current baseboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: gpuboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpuboard
+ * temperature metrics data. The file gpuboard_temp is used for this.
+ * Reading the file will dump all the current gpuboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
+static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
+
+static struct attribute *board_attrs[] = {
+ &dev_attr_baseboard_temp.attr,
+ &dev_attr_gpuboard_temp.attr,
+ NULL
+};
+
+static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_baseboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
+ return 0;
+ }
+
+ if (attr == &dev_attr_gpuboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+ .is_visible = amdgpu_board_attr_visible,
+};
+
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
@@ -3458,14 +3586,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
- if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
- return 0;
+ if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
+ if (adev->family == AMDGPU_FAMILY_SI ||
+ ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
+ (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
+ return 0;
+ }
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
@@ -4461,6 +4591,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
goto err_out0;
}
+ if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_board_attr_group);
+ if (ret)
+ goto err_out0;
+ }
+
adev->pm.sysfs_initialized = true;
return 0;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 768317ee1486..9748744133d9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -526,6 +526,8 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table);
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -613,5 +615,8 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b47cb4a5f488..c5965924e7c6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -766,6 +766,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 14):
case IP_VERSION(13, 0, 12):
smu_v13_0_6_set_ppt_funcs(smu);
+ smu_v13_0_6_set_temp_funcs(smu);
/* Enable pp_od_clk_voltage node */
smu->od_enabled = true;
break;
@@ -3831,6 +3832,51 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
+{
+ struct smu_context *smu = handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ enum smu_table_id table_id;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
+ return -EOPNOTSUPP;
+
+ table_id = smu_metrics_get_temp_table_id(type);
+
+ if (table_id == SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ /* If the request is to get size alone, return the cached table size */
+ if (!table && tables[table_id].cache.size)
+ return tables[table_id].cache.size;
+
+ if (smu_table_cache_is_valid(&tables[table_id])) {
+ memcpy(table, tables[table_id].cache.buffer,
+ tables[table_id].cache.size);
+ return tables[table_id].cache.size;
+ }
+
+ return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
+}
+
+static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
+{
+ struct smu_context *smu = handle;
+ bool ret = false;
+
+ if (!smu->pm_enabled)
+ return false;
+
+ if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
+ ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
+
+ return ret;
+}
+
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
{
struct smu_context *smu = handle;
@@ -3903,6 +3949,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
.get_xcp_metrics = smu_sys_get_xcp_metrics,
+ .get_temp_metrics = smu_sys_get_temp_metrics,
+ .temp_metrics_is_supported = smu_temp_metrics_is_supported,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -4076,6 +4124,16 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+bool smu_reset_vcn_is_supported(struct smu_context *smu)
+{
+ bool ret = false;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_vcn_is_supported)
+ ret = smu->ppt_funcs->reset_vcn_is_supported(smu);
+
+ return ret;
+}
+
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b52e194397e2..5dd49eca598d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -249,6 +249,14 @@ struct smu_user_dpm_profile {
tables[table_id].domain = d; \
} while (0)
+struct smu_table_cache {
+ void *buffer;
+ size_t size;
+ /* interval in ms*/
+ uint32_t interval;
+ unsigned long last_cache_time;
+};
+
struct smu_table {
uint64_t size;
uint32_t align;
@@ -257,6 +265,7 @@ struct smu_table {
void *cpu_addr;
struct amdgpu_bo *bo;
uint32_t version;
+ struct smu_table_cache cache;
};
enum smu_perf_level_designation {
@@ -322,6 +331,8 @@ enum smu_table_id {
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
SMU_TABLE_WIFIBAND,
+ SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ SMU_TABLE_BASEBOARD_TEMP_METRICS,
SMU_TABLE_COUNT,
};
@@ -396,6 +407,10 @@ struct smu_dpm_context {
struct smu_dpm_policy_ctxt *dpm_policies;
};
+struct smu_temp_context {
+ const struct smu_temp_funcs *temp_funcs;
+};
+
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
@@ -529,6 +544,7 @@ struct smu_context {
struct smu_table_context smu_table;
struct smu_dpm_context smu_dpm;
struct smu_power_context smu_power;
+ struct smu_temp_context smu_temp;
struct smu_feature smu_feature;
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
@@ -624,6 +640,28 @@ struct smu_context {
struct i2c_adapter;
/**
+ * struct smu_temp_funcs - Callbacks used to get temperature data.
+ */
+struct smu_temp_funcs {
+ /**
+ * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: Size of &table
+ */
+ ssize_t (*get_temp_metrics)(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table);
+
+ /**
+ * @temp_metrics_is_support: Get if specific temperature metrics is supported
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: true if supported else false
+ */
+ bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
+
+};
+
+/**
* struct pptable_funcs - Callbacks used to interact with the SMU.
*/
struct pptable_funcs {
@@ -1397,6 +1435,10 @@ struct pptable_funcs {
* @reset_vcn: message SMU to soft reset vcn instance.
*/
int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+ /**
+ * @reset_vcn_is_supported: Check if support resets vcn.
+ */
+ bool (*reset_vcn_is_supported)(struct smu_context *smu);
/**
* @get_ecc_table: message SMU to get ECC INFO table.
@@ -1622,6 +1664,71 @@ typedef struct {
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+ default:
+ return SMU_TABLE_COUNT;
+ }
+
+ return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+ unsigned long time)
+{
+ table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+ if (!table->cache.buffer || !table->cache.last_cache_time ||
+ !table->cache.interval || !table->cache.size ||
+ time_after(jiffies,
+ table->cache.last_cache_time +
+ msecs_to_jiffies(table->cache.interval)))
+ return false;
+
+ return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+ enum smu_table_id table_id, size_t size,
+ uint32_t cache_interval)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+ if (!tables[table_id].cache.buffer)
+ return -ENOMEM;
+
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = cache_interval;
+ tables[table_id].cache.size = size;
+
+ return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+ enum smu_table_id table_id)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ if (tables[table_id].cache.buffer) {
+ kfree(tables[table_id].cache.buffer);
+ tables[table_id].cache.buffer = NULL;
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = 0;
+ }
+}
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
@@ -1673,6 +1780,7 @@ int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_vcn_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index 0a2ca544f4e3..1c407a8e96ee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -135,7 +135,63 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x13
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_VDDCR_VDD0_TEMP,
+ SVI_VDDCR_VDD1_TEMP,
+ SVI_VDDCR_VDD2_TEMP,
+ SVI_VDDCR_VDD3_TEMP,
+ SVI_VDDCR_SOC_A_TEMP,
+ SVI_VDDCR_SOC_C_TEMP,
+ SVI_VDDCR_SOCIO_A_TEMP,
+ SVI_VDDCR_SOCIO_C_TEMP,
+ SVI_VDD_085_HBM_TEMP,
+ SVI_VDDCR_11_HBM_B_TEMP,
+ SVI_VDDCR_11_HBM_D_TEMP,
+ SVI_VDD_USR_TEMP,
+ SVI_VDDIO_11_E32_TEMP,
+ SVI_MAX_TEMP_ENTRIES, // 13
+} SVI_TEMP_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x14
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x0
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -231,11 +287,27 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t GfxclkBelowHostLimitThmAcc[8];
uint64_t GfxclkBelowHostLimitTotalAcc[8];
uint64_t GfxclkLowUtilizationAcc[8];
+
+ uint32_t AidTemperature[4];
+ uint32_t XcdTemperature[8];
+ uint32_t HbmTemperature[8];
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; // Defaults to 0.
+ uint16_t NodeIdentifier; // Unique identifier to each node on system.
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius
+ int16_t spare[3];
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
index e1f490b6ce64..aff2776a8b6f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -116,7 +116,11 @@
#define PPSMC_MSG_DumpErrorRecord 0x57
#define PPSMC_MSG_EraseRasTable 0x58
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
+#define PPSMC_MSG_GetBadPageSeverity 0x5B
+#define PPSMC_MSG_GetSystemMetricsTable 0x5C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
+#define PPSMC_Message_Count 0x5E
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41f268313613..63a088ef7169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,9 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_MSG_ResetVCN 0x4E
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVCN 0x5B
+#define PPSMC_Message_Count 0x5C
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index d7a9e41820fa..2256c77da636 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -278,7 +278,8 @@
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
__SMU_DUMMY_MAP(ResetVCN), \
- __SMU_DUMMY_MAP(GetStaticMetricsTable),
+ __SMU_DUMMY_MAP(GetStaticMetricsTable), \
+ __SMU_DUMMY_MAP(GetSystemMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -469,6 +470,7 @@ enum smu_feature_mask {
/* Message category flags */
#define SMU_MSG_VF_FLAG (1U << 0)
#define SMU_MSG_RAS_PRI (1U << 1)
+#define SMU_MSG_NO_PRECHECK (1U << 2)
/* Firmware capability flags */
#define SMU_FW_CAP_RAS_PRI (1U << 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9ad46f545d15..599eddb5a67d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1897,7 +1897,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index c63d2e28954d..b067147b7c41 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1781,7 +1781,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 02a455a31c25..32fd0be05cff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -83,7 +83,6 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
};
-// clang-format off
const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -106,7 +105,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -138,8 +137,47 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 0),
};
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table_cache *cache;
+ int ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ /* Initialize base board temperature metrics */
+ cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache);
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer;
+ smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache);
+ gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer;
+ smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0);
+
+ return 0;
+}
+
+void smu_v13_0_12_tables_fini(struct smu_context *smu)
+{
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS);
+}
+
static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
@@ -184,7 +222,8 @@ static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu,
int smu_v13_0_12_get_max_metrics_size(void)
{
- return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
+ return max3(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t),
+ sizeof(SystemMetricsTable_t));
}
static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
@@ -220,7 +259,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t table_version;
- int ret, i;
+ int ret, i, n;
if (!pptable->Init) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -259,6 +298,22 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
/* use AID0 serial number by default */
pptable->PublicSerialNumber_AID =
static_metrics->PublicSerialNumber_AID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
if (ret)
return ret;
@@ -359,6 +414,243 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu,
+ void *metrics_table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ if (metrics_table)
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SystemMetricsTable_t));
+
+ return 0;
+}
+
+static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type)
+{
+ switch (type) {
+ case NODE_TEMP_RETIMER:
+ return AMDGPU_RETIMER_X_TEMP;
+ case NODE_TEMP_IBC_TEMP:
+ return AMDGPU_OAM_X_IBC_TEMP;
+ case NODE_TEMP_IBC_2_TEMP:
+ return AMDGPU_OAM_X_IBC_2_TEMP;
+ case NODE_TEMP_VDD18_VR_TEMP:
+ return AMDGPU_OAM_X_VDD18_VR_TEMP;
+ case NODE_TEMP_04_HBM_B_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_B_VR_TEMP;
+ case NODE_TEMP_04_HBM_D_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_D_VR_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type)
+{
+ switch (type) {
+ case SVI_VDDCR_VDD0_TEMP:
+ return AMDGPU_VDDCR_VDD0_TEMP;
+ case SVI_VDDCR_VDD1_TEMP:
+ return AMDGPU_VDDCR_VDD1_TEMP;
+ case SVI_VDDCR_VDD2_TEMP:
+ return AMDGPU_VDDCR_VDD2_TEMP;
+ case SVI_VDDCR_VDD3_TEMP:
+ return AMDGPU_VDDCR_VDD3_TEMP;
+ case SVI_VDDCR_SOC_A_TEMP:
+ return AMDGPU_VDDCR_SOC_A_TEMP;
+ case SVI_VDDCR_SOC_C_TEMP:
+ return AMDGPU_VDDCR_SOC_C_TEMP;
+ case SVI_VDDCR_SOCIO_A_TEMP:
+ return AMDGPU_VDDCR_SOCIO_A_TEMP;
+ case SVI_VDDCR_SOCIO_C_TEMP:
+ return AMDGPU_VDDCR_SOCIO_C_TEMP;
+ case SVI_VDD_085_HBM_TEMP:
+ return AMDGPU_VDD_085_HBM_TEMP;
+ case SVI_VDDCR_11_HBM_B_TEMP:
+ return AMDGPU_VDDCR_11_HBM_B_TEMP;
+ case SVI_VDDCR_11_HBM_D_TEMP:
+ return AMDGPU_VDDCR_11_HBM_D_TEMP;
+ case SVI_VDD_USR_TEMP:
+ return AMDGPU_VDD_USR_TEMP;
+ case SVI_VDDIO_11_E32_TEMP:
+ return AMDGPU_VDDIO_11_E32_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type)
+{
+ switch (type) {
+ case SYSTEM_TEMP_UBB_FPGA:
+ return AMDGPU_UBB_FPGA_TEMP;
+ case SYSTEM_TEMP_UBB_FRONT:
+ return AMDGPU_UBB_FRONT_TEMP;
+ case SYSTEM_TEMP_UBB_BACK:
+ return AMDGPU_UBB_BACK_TEMP;
+ case SYSTEM_TEMP_UBB_OAM7:
+ return AMDGPU_UBB_OAM7_TEMP;
+ case SYSTEM_TEMP_UBB_IBC:
+ return AMDGPU_UBB_IBC_TEMP;
+ case SYSTEM_TEMP_UBB_UFPGA:
+ return AMDGPU_UBB_UFPGA_TEMP;
+ case SYSTEM_TEMP_UBB_OAM1:
+ return AMDGPU_UBB_OAM1_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_HSC:
+ return AMDGPU_OAM_0_1_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_2_3_HSC:
+ return AMDGPU_OAM_2_3_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_HSC:
+ return AMDGPU_OAM_4_5_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_6_7_HSC:
+ return AMDGPU_OAM_6_7_HSC_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_0V72_VR:
+ return AMDGPU_UBB_FPGA_0V72_VR_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_3V3_VR:
+ return AMDGPU_UBB_FPGA_3V3_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR:
+ return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR:
+ return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_0V9_VR:
+ return AMDGPU_RETIMER_0_1_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_0V9_VR:
+ return AMDGPU_RETIMER_4_5_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_2_3_0V9_VR:
+ return AMDGPU_RETIMER_2_3_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_6_7_0V9_VR:
+ return AMDGPU_RETIMER_6_7_0V9_VR_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR:
+ return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR:
+ return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP;
+ case SYSTEM_TEMP_IBC_HSC:
+ return AMDGPU_IBC_HSC_TEMP;
+ case SYSTEM_TEMP_IBC:
+ return AMDGPU_IBC_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0 &&
+ smu->adev->gmc.xgmi.num_physical_nodes > 1 &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)))
+ return true;
+ break;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS));
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SystemMetricsTable_t *metrics =
+ (SystemMetricsTable_t *)smu_table->metrics_table;
+
+ struct smu_table *data_table;
+ int ret, sensor_type;
+ u32 idx, sensors;
+ ssize_t size;
+
+ if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ /* Initialize base board temperature metrics */
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS];
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ } else {
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS];
+ gpuboard_temp_metrics =
+ (struct amdgpu_gpuboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu, NULL);
+ if (ret)
+ return ret;
+
+ smu_table_cache_update_time(data_table, jiffies);
+
+ if (type == SMU_TEMP_METRIC_GPUBOARD) {
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->NodeTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_node_sensor_type(sensors);
+ gpuboard_temp_metrics->node_temp[idx] =
+ ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+
+ idx = 0;
+
+ for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->VrTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors);
+ gpuboard_temp_metrics->vr_temp[idx] =
+ ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ } else if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) {
+ if (metrics->SystemTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_system_sensor_type(sensors);
+ baseboard_temp_metrics->system_temp[idx] =
+ ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF;
+ baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ }
+
+ memcpy(table, data_table->cache.buffer, size);
+
+ return size;
+}
+
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
{
const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
@@ -572,3 +864,8 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
return sizeof(*gpu_metrics);
}
+
+const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
+ .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v13_0_12_get_temp_metrics,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9cc294f4708b..e37b7b5358ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -145,7 +145,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -177,7 +177,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
- MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
// clang-format on
@@ -312,6 +312,8 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551800)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
@@ -350,6 +352,13 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
+
+ if (fw_ver >= 0x04560700) {
+ if (!amdgpu_sriov_vf(smu->adev))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ } else {
+ smu_v13_0_12_tables_fini(smu);
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,19 +411,37 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
- if ((pgm == 0 && fw_ver >= 0x00557F01) ||
- (pgm == 7 && fw_ver >= 0x7551000)) {
- smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
- smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+
+ if (amdgpu_sriov_vf(adev)) {
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ } else {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
- if ((pgm == 0 && fw_ver >= 0x00558000) ||
- (pgm == 7 && fw_ver >= 0x7551000))
- smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if (((pgm == 0) && (fw_ver >= 0x00558200)) ||
+ ((pgm == 4) && (fw_ver >= 0x04557100)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
@@ -511,8 +538,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ void *gpu_metrics_table __free(kfree) = NULL;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
+ int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
@@ -528,27 +559,32 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- if (!smu_table->metrics_table)
+ metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
- smu_table->gpu_metrics_table =
+ gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!smu_table->gpu_metrics_table) {
- kfree(smu_table->metrics_table);
+ if (!gpu_metrics_table)
return -ENOMEM;
- }
- smu_table->driver_pptable =
- kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
- if (!smu_table->driver_pptable) {
- kfree(smu_table->metrics_table);
- kfree(smu_table->gpu_metrics_table);
+ driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
return -ENOMEM;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12)) {
+ ret = smu_v13_0_12_tables_init(smu);
+ if (ret)
+ return ret;
}
+ smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table);
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
return 0;
}
@@ -677,6 +713,13 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ smu_v13_0_12_tables_fini(smu);
+ return smu_v13_0_fini_smc_tables(smu);
+}
+
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
@@ -803,7 +846,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
- int ret, i, retry = 100;
+ int ret, i, retry = 100, n;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
@@ -865,6 +908,23 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ GET_METRIC_FIELD(PublicSerialNumber_AID,
+ version)[i]);
+ }
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ GET_METRIC_FIELD(PublicSerialNumber_XCD,
+ version)[i]);
+ }
+
pptable->Init = true;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -2560,9 +2620,9 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
@@ -2587,17 +2647,14 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
return -ENOMEM;
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
- goto out;
- }
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
+ metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2668,8 +2725,6 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
idx++;
}
}
-out:
- kfree(metrics_v0);
return sizeof(*xcp_metrics);
}
@@ -2680,31 +2735,26 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct gpu_metrics_v1_8 *gpu_metrics =
(struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
- ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
- kfree(metrics_v0);
- return num_bytes;
- }
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2890,7 +2940,6 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
*table = (void *)gpu_metrics;
- kfree(metrics_v0);
return sizeof(*gpu_metrics);
}
@@ -3076,7 +3125,7 @@ static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int var = (adev->pdev->device & 0xF);
- if (var == 0x1)
+ if (var == 0x0 || var == 0x1 || var == 0x3)
return true;
return false;
@@ -3152,6 +3201,11 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
+}
+
static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
@@ -3797,7 +3851,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
- .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
@@ -3840,6 +3894,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
+ .reset_vcn_is_supported = smu_v13_0_6_reset_vcn_is_supported,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -3857,3 +3912,9 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
+
+void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
+{
+ smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 67b30674fd31..bcb8246c0804 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -64,14 +64,17 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(VCN_RESET),
SMU_CAP(STATIC_METRICS),
SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(BOARD_VOLTAGE),
SMU_CAP(PLDM_VERSION),
+ SMU_CAP(TEMP_METRICS),
SMU_CAP(ALL),
};
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+extern void smu_v13_0_6_set_temp_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
@@ -86,6 +89,9 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
struct amdgpu_xcp *xcp, void *table,
void *smu_metrics);
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+void smu_v13_0_12_tables_fini(struct smu_context *smu);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 3aea32baea3d..f32474af90b3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
@@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit)
- *max_power_limit = msg_limit;
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ }
+ }
+
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
+
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
- if (min_power_limit)
- *min_power_limit = 0;
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 + od_percent_lower);
+ *min_power_limit /= 100;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 59f9abd0f7b8..f532f7c69259 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -256,11 +256,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
uint32_t flags, resp;
- bool fed_status;
+ bool fed_status, pri;
flags = __smu_cmn_get_msg_flags(smu, msg);
*poll = true;
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
/* When there is RAS fatal error, FW won't process non-RAS priority
* messages. Don't allow any messages other than RAS priority messages.
*/
@@ -272,15 +273,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
smu_get_message_name(smu, msg));
return -EACCES;
}
+ }
+ if (pri || fed_status) {
/* FW will ignore non-priority messages when a RAS fatal error
- * is detected. Hence it is possible that a previous message
- * wouldn't have got response. Allow to continue without polling
- * for response status for priority messages.
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
*/
resp = RREG32(smu->resp_reg);
dev_dbg(adev->dev,
- "Sending RAS priority message %s response status: %x",
+ "Sending priority message %s response status: %x",
smu_get_message_name(smu, msg), resp);
if (resp == 0)
*poll = false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a608cdbdada4..d588f74b98de 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -65,6 +65,32 @@
header->structure_size = sizeof(*tmp); \
} while (0)
+#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ac77d1246b94..811265648a58 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level;
+ unsigned offset, i;
unsigned width, height, depth, size;
unsigned blocksize;
unsigned nbx, nby;
@@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
w0 = r600_mip_minify(w0, 0);
h0 = r600_mip_minify(h0, 0);
d0 = r600_mip_minify(d0, 0);
- for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ for (i = 0, offset = 0; i < nlevels; i++) {
width = r600_mip_minify(w0, i);
nbx = r600_fmt_get_nblocksx(format, width);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3e510327b7..9e35b14e2bf0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -554,7 +554,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
- * Note 3: when limiting vram it's safe to overwritte real_vram_size because
+ * Note 3: when limiting vram it's safe to overwrite real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
@@ -562,7 +562,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * FIXME: when reducing VRAM size, align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4dc77c398617..351b9dfcdad8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -926,10 +926,10 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+ ref_div_max = clamp(100 / post_div, 1u, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(den/post_div, 1u), ref_div_max);
+ *ref_div = clamp(den / post_div, 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4bb242437ff6..acd89a20f272 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -346,14 +346,14 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = vzalloc(array_size(sizeof(void *),
- rdev->gart.num_cpu_pages));
+ rdev->gart.pages = vcalloc(rdev->gart.num_cpu_pages,
+ sizeof(void *));
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
- rdev->gart.num_gpu_pages));
+ rdev->gart.pages_entry = vmalloc_array(rdev->gart.num_gpu_pages,
+ sizeof(uint64_t));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index c9fef9b61ced..818554e60537 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -455,7 +455,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
@@ -481,7 +481,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 2355a78e1b69..bdbc1bbe8a9b 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -86,7 +86,7 @@ int radeon_vce_init(struct radeon_device *rdev)
r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
if (r) {
- dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+ dev_err(rdev->dev, "radeon_vce: can't load firmware \"%s\"\n",
fw_name);
return r;
}
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
- /* we can only work with this fw version for now */
+ /* we can only work with these fw versions for now */
if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
@@ -281,7 +281,7 @@ static void radeon_vce_idle_work_handler(struct work_struct *work)
*
* @rdev: radeon_device pointer
*
- * Make sure VCE is powerd up when we want to use it
+ * Make sure VCE is powered up when we want to use it
*/
void radeon_vce_note_usage(struct radeon_device *rdev)
{