summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-04-02 13:00:58 +1000
committerDave Airlie <airlied@redhat.com>2026-04-02 13:17:38 +1000
commitcdd65e8bb9540b67d164cec760614ff74c560c49 (patch)
treed967c380d801fee53019763906b353380fda5959
parent9bdbf7eb25b3121ef19533df4fb70f2c39fc0d6a (diff)
parent8b3e8fa6d7bdab292447a43f70532db437d5d4f5 (diff)
Merge tag 'amd-drm-next-7.1-2026-04-01' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-7.1-2026-04-01: amdgpu: - UserQ fixes - PASID handling fix - S4 fix for smu11 chips - devcoredump fixes - RAS fixes - Misc small fixes - DCN 4.2 updates - DVI fixes - DML fixes - DC pipe validation fixes - eDP DSC seamless boot - DC FP rework - swsmu cleanups - GC 11.5.4 updates - Add DC idle state manager - Add support for using multiple engines for buffer fills and clears - Misc SMU7 fixes amdkfd: - Non-4K page fixes - Logging cleanups - sysfs fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20260401184456.3576660-1-alexander.deucher@amd.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c92
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c598
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.h151
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h63
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c581
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c20
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c18
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c32
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h136
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c15
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c123
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c83
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c5
303 files changed, 4153 insertions, 1892 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 7f4751e5caaf..cd9aa5b45e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -373,249 +373,280 @@ int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
return -ENODEV;
}
-int
-amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type,
int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index, i = 0;
+ int index;
u16 data_offset, size;
union igp_info *igp_info;
- union vram_info *vram_info;
- union umc_info *umc_info;
- union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
- u8 mem_vendor;
u32 mem_channel_number;
u32 mem_channel_width;
- u32 module_id;
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
integratedsysteminfo);
- else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ case 3:
+ mem_channel_number = igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
+ return -EINVAL;
}
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- switch (crev) {
- case 11:
- case 12:
- mem_channel_number = igp_info->v11.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v11.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
- break;
- case 2:
- switch (crev) {
- case 1:
- case 2:
- mem_channel_number = igp_info->v21.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v21.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- case 3:
- mem_channel_number = igp_info->v23.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v23.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
+
+ if (frev == 4) {
+ switch (crev) {
+ case 0:
+ mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
+ mem_type = le32_to_cpu(umc_info->v40.vram_type);
+ mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
+ mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
}
} else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
-
- if (frev == 4) {
- switch (crev) {
- case 0:
- mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
- mem_type = le32_to_cpu(umc_info->v40.vram_type);
- mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
- mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
- break;
- default:
- return -EINVAL;
- }
- } else
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index, i = 0;
+ u16 data_offset, size;
+ union vram_info *vram_info;
+ union vram_module *vram_module;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ vram_info = (union vram_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
+ if (frev == 3) {
+ switch (crev) {
+ /* v30 */
+ case 0:
+ vram_module = (union vram_module *)vram_info->v30.vram_module;
+ mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ mem_type = vram_info->v30.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_info->v30.channel_num;
+ mem_channel_width = vram_info->v30.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * 16;
break;
default:
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
-
- module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
- if (frev == 3) {
- switch (crev) {
- /* v30 */
- case 0:
- vram_module = (union vram_module *)vram_info->v30.vram_module;
- mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- mem_type = vram_info->v30.memory_type;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_info->v30.channel_num;
- mem_channel_width = vram_info->v30.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * 16;
- break;
- default:
- return -EINVAL;
- }
- } else if (frev == 2) {
- switch (crev) {
- /* v23 */
- case 3:
- if (module_id > vram_info->v23.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module *)vram_info->v23.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module->v9.vram_module_size);
- i++;
- }
- mem_type = vram_module->v9.memory_type;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module->v9.channel_num;
- mem_channel_width = vram_module->v9.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
- mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v24 */
- case 4:
- if (module_id > vram_info->v24.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module *)vram_info->v24.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module->v10.vram_module_size);
- i++;
- }
- mem_type = vram_module->v10.memory_type;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module->v10.channel_num;
- mem_channel_width = vram_module->v10.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
- mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v25 */
- case 5:
- if (module_id > vram_info->v25.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module *)vram_info->v25.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module->v11.vram_module_size);
- i++;
- }
- mem_type = vram_module->v11.memory_type;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module->v11.channel_num;
- mem_channel_width = vram_module->v11.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
- mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v26 */
- case 6:
- if (module_id > vram_info->v26.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module *)vram_info->v26.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module->v9.vram_module_size);
- i++;
- }
- mem_type = vram_module->v9.memory_type;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module->v9.channel_num;
- mem_channel_width = vram_module->v9.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
- mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- default:
- return -EINVAL;
- }
- } else {
- /* invalid frev */
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else if (frev == 2) {
+ switch (crev) {
+ /* v23 */
+ case 3:
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
}
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v24 */
+ case 4:
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v25 */
+ case 5:
+ if (module_id > vram_info->v25.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v25.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v11.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v11.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v11.channel_num;
+ mem_channel_width = vram_module->v11.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v26 */
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ default:
+ return -EINVAL;
}
+ } else {
+ /* invalid frev */
+ return -EINVAL;
}
+
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 67c8d105729b..0760e4510513 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -30,6 +30,10 @@ uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *ade
bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index c048217615c1..b24d5d21be5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -908,9 +908,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- /* One fence for TTM and one for each CS job */
r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
- 1 + p->gang_size);
+ TTM_NUM_MOVE_FENCES + p->gang_size);
drm_exec_retry_on_contention(&p->exec);
if (unlikely(r))
goto out_free_user_pages;
@@ -920,7 +919,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_bo) {
r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
- 1 + p->gang_size);
+ TTM_NUM_MOVE_FENCES + p->gang_size);
drm_exec_retry_on_contention(&p->exec);
if (unlikely(r))
goto out_free_user_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 2b54a67437c2..fddf4e1252bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -35,6 +35,9 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
void amdgpu_coredump_init(struct amdgpu_device *adev)
{
}
+void amdgpu_coredump_fini(struct amdgpu_device *adev)
+{
+}
#else
#define AMDGPU_CORE_DUMP_SIZE_MAX (256 * 1024 * 1024)
@@ -192,12 +195,16 @@ static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev,
drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n",
adev->vpe.feature_version, adev->vpe.fw_version);
- drm_printf(p, "\nVBIOS Information\n");
- drm_printf(p, "vbios name : %s\n", ctx->name);
- drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
- drm_printf(p, "vbios version : %d\n", ctx->version);
- drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
- drm_printf(p, "vbios date : %s\n", ctx->date);
+ if (adev->bios) {
+ drm_printf(p, "\nVBIOS Information\n");
+ drm_printf(p, "vbios name : %s\n", ctx->name);
+ drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
+ drm_printf(p, "vbios version : %d\n", ctx->version);
+ drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
+ drm_printf(p, "vbios date : %s\n", ctx->date);
+ }else {
+ drm_printf(p, "\nVBIOS Information: NA\n");
+ }
}
static ssize_t
@@ -436,4 +443,10 @@ void amdgpu_coredump_init(struct amdgpu_device *adev)
{
INIT_WORK(&adev->coredump_work, amdgpu_devcoredump_deferred_work);
}
+
+void amdgpu_coredump_fini(struct amdgpu_device *adev)
+{
+ /* Finish deferred coredump formatting before HW/IP teardown. */
+ flush_work(&adev->coredump_work);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
index b3582d0b4ca4..f8f2f4df129b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -50,4 +50,5 @@ struct amdgpu_coredump_info {
void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
bool vram_lost, struct amdgpu_job *job);
void amdgpu_coredump_init(struct amdgpu_device *adev);
+void amdgpu_coredump_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a7038f039b10..9c936519bb2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4225,6 +4225,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
if (pci_dev_is_disconnected(adev->pdev))
amdgpu_amdkfd_device_fini_sw(adev);
+ amdgpu_coredump_fini(adev);
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a44baa9ee78d..8ed637f92322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2683,8 +2683,12 @@ static int amdgpu_pmops_freeze(struct device *dev)
if (r)
return r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index e2d32c29668a..bc772ca3dab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -404,6 +404,50 @@ void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
}
/**
+ * amdgpu_gart_map_gfx9_mqd - map mqd and ctrl_stack dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ *
+ * Map the MQD and control stack addresses into GART entries with the correct
+ * memory types on gfxv9. The MQD occupies the first 4KB and is followed by
+ * the control stack. The MQD uses UC (uncached) memory, while the control stack
+ * uses NC (non-coherent) memory.
+ */
+void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags)
+{
+ uint64_t page_base;
+ unsigned int i, j, t;
+ int idx;
+ uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
+ void *dst;
+
+ if (!adev->gart.ptr)
+ return;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ t = offset / AMDGPU_GPU_PAGE_SIZE;
+ dst = adev->gart.ptr;
+ for (i = 0; i < pages; i++) {
+ page_base = dma_addr[i];
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
+ if ((i == 0) && (j == 0))
+ amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
+ else
+ amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, ctrl_flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
+ }
+ }
+ drm_dev_exit(idx);
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index d3118275ddae..6ebd2da32ea6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -62,6 +62,8 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags,
void *dst);
+void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags);
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags);
void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 860a4405f7dd..ec74f3971732 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -34,6 +34,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_atomfirmware.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
@@ -1747,3 +1748,31 @@ int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
return 0;
}
+
+int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor)
+{
+ int ret = 0;
+
+ if (adev->flags & AMD_IS_APU)
+ return amdgpu_atomfirmware_get_integrated_system_info(adev,
+ vram_width, vram_type, vram_vendor);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ return amdgpu_atomfirmware_get_umc_info(adev,
+ vram_width, vram_type, vram_vendor);
+ case IP_VERSION(9, 5, 0):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 4, 3):
+ ret = amdgpu_atomfirmware_get_umc_info(adev,
+ vram_width, vram_type, vram_vendor);
+ if (vram_width && !ret)
+ *vram_width *= hweight32(adev->aid_mask);
+ return ret;
+ default:
+ return amdgpu_atomfirmware_get_vram_info(adev,
+ vram_width, vram_type, vram_vendor);
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b9fdc3276e81..32e73e8ba778 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -482,4 +482,6 @@ amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges);
+int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index d88523568b62..569c5a89ff10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -68,8 +68,11 @@ int amdgpu_pasid_alloc(unsigned int bits)
return -EINVAL;
spin_lock(&amdgpu_pasid_idr_lock);
+ /* TODO: Need to replace the idr with an xarry, and then
+ * handle the internal locking with ATOMIC safe paths.
+ */
pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1,
- 1U << bits, GFP_KERNEL);
+ 1U << bits, GFP_ATOMIC);
spin_unlock(&amdgpu_pasid_idr_lock);
if (pasid >= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 90352284c5ee..51ab1a332615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -44,6 +44,7 @@
#include <drm/display/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
#include "amdgpu_dm_irq_params.h"
+#include "amdgpu_dm_ism.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -486,6 +487,10 @@ struct amdgpu_crtc {
int deferred_flip_completion;
/* parameters access from DM IRQ handler */
struct dm_irq_params dm_irq_params;
+
+ /* DM idle state manager */
+ struct amdgpu_dm_ism ism;
+
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cc004830a8a2..27f7567df7bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1325,7 +1325,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
- r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv,
+ r = amdgpu_fill_buffer(amdgpu_ttm_next_clear_entity(adev),
+ abo, 0, &bo->base._resv,
&fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 44fba4b6aa92..cdf4909592d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -1558,6 +1558,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int dev_var = adev->pdev->device & 0xF;
+ uint32_t vram_type = adev->gmc.vram_type;
int res;
if (amdgpu_ras_smu_eeprom_supported(adev))
@@ -1597,6 +1599,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return amdgpu_ras_eeprom_reset_table(control);
}
+ if (!(adev->flags & AMD_IS_APU) && (dev_var == 0x5) &&
+ (vram_type == AMDGPU_VRAM_TYPE_HBM3E) &&
+ (hdr->version < RAS_TABLE_VER_V3)) {
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
switch (hdr->version) {
case RAS_TABLE_VER_V2_1:
case RAS_TABLE_VER_V3:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b4ab309bf08a..afaaab6496de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -387,9 +387,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_ttm_buffer_entity *entity;
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
+ u32 e;
src.bo = bo;
dst.bo = bo;
@@ -398,8 +400,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
src.offset = 0;
dst.offset = 0;
+ e = atomic_inc_return(&adev->mman.next_move_entity) %
+ adev->mman.num_move_entities;
+ entity = &adev->mman.move_entities[e];
+
r = amdgpu_ttm_copy_mem_to_mem(adev,
- &adev->mman.move_entity,
+ entity,
&src, &dst,
new_mem->size,
amdgpu_bo_encrypted(abo),
@@ -411,9 +417,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
-
- r = amdgpu_fill_buffer(&adev->mman.move_entity,
- abo, 0, NULL, &wipe_fence,
+ r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence,
AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
@@ -854,25 +858,15 @@ static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
uint64_t page_idx, pages_per_xcc;
int i;
- uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
pages_per_xcc = total_pages;
do_div(pages_per_xcc, num_xcc);
for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
- /* MQD page: use default flags */
- amdgpu_gart_bind(adev,
+ amdgpu_gart_map_gfx9_mqd(adev,
gtt->offset + (page_idx << PAGE_SHIFT),
- 1, &gtt->ttm.dma_address[page_idx], flags);
- /*
- * Ctrl pages - modify the memory type to NC (ctrl_flags) from
- * the second page of the BO onward.
- */
- amdgpu_gart_bind(adev,
- gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
- pages_per_xcc - 1,
- &gtt->ttm.dma_address[page_idx + 1],
- ctrl_flags);
+ pages_per_xcc, &gtt->ttm.dma_address[page_idx],
+ flags);
}
}
@@ -2345,8 +2339,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ u32 num_clear_entities, num_move_entities;
uint64_t size;
- int r;
+ int r, i, j;
if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
@@ -2361,6 +2356,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
+ num_clear_entities = 1;
+ num_move_entities = 1;
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
@@ -2373,36 +2370,64 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
- r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
- &adev->mman.clear_entity,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, 1);
- if (r < 0) {
- dev_err(adev->dev,
- "Failed setting up TTM BO clear entity (%d)\n", r);
+ adev->mman.clear_entities = kcalloc(num_clear_entities,
+ sizeof(struct amdgpu_ttm_buffer_entity),
+ GFP_KERNEL);
+ atomic_set(&adev->mman.next_clear_entity, 0);
+ if (!adev->mman.clear_entities)
goto error_free_default_entity;
+
+ adev->mman.num_clear_entities = num_clear_entities;
+
+ for (i = 0; i < num_clear_entities; i++) {
+ r = amdgpu_ttm_buffer_entity_init(
+ &adev->mman.gtt_mgr, &adev->mman.clear_entities[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1);
+
+ if (r < 0) {
+ for (j = 0; j < i; j++)
+ amdgpu_ttm_buffer_entity_fini(
+ &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]);
+ kfree(adev->mman.clear_entities);
+ adev->mman.num_clear_entities = 0;
+ adev->mman.clear_entities = NULL;
+ goto error_free_default_entity;
+ }
}
- r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
- &adev->mman.move_entity,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, 2);
- if (r < 0) {
- dev_err(adev->dev,
- "Failed setting up TTM BO move entity (%d)\n", r);
- goto error_free_clear_entity;
+ adev->mman.num_move_entities = num_move_entities;
+ atomic_set(&adev->mman.next_move_entity, 0);
+ for (i = 0; i < num_move_entities; i++) {
+ r = amdgpu_ttm_buffer_entity_init(
+ &adev->mman.gtt_mgr,
+ &adev->mman.move_entities[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 2);
+
+ if (r < 0) {
+ for (j = 0; j < i; j++)
+ amdgpu_ttm_buffer_entity_fini(
+ &adev->mman.gtt_mgr, &adev->mman.move_entities[j]);
+ adev->mman.num_move_entities = 0;
+ goto error_free_clear_entities;
+ }
}
} else {
amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
&adev->mman.default_entity);
- amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
- &adev->mman.clear_entity);
- amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
- &adev->mman.move_entity);
+ for (i = 0; i < adev->mman.num_clear_entities; i++)
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
+ &adev->mman.clear_entities[i]);
+ for (i = 0; i < adev->mman.num_move_entities; i++)
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
+ &adev->mman.move_entities[i]);
/* Drop all the old fences since re-creating the scheduler entities
* will allocate new contexts.
*/
ttm_resource_manager_cleanup(man);
+ kfree(adev->mman.clear_entities);
+ adev->mman.clear_entities = NULL;
+ adev->mman.num_clear_entities = 0;
+ adev->mman.num_move_entities = 0;
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
@@ -2415,9 +2440,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
-error_free_clear_entity:
- amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
- &adev->mman.clear_entity);
+error_free_clear_entities:
+ for (i = 0; i < adev->mman.num_clear_entities; i++)
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
+ &adev->mman.clear_entities[i]);
+ kfree(adev->mman.clear_entities);
+ adev->mman.clear_entities = NULL;
+ adev->mman.num_clear_entities = 0;
error_free_default_entity:
amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
&adev->mman.default_entity);
@@ -2567,8 +2596,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
if (!fence)
return -EINVAL;
-
- entity = &adev->mman.clear_entity;
+ entity = &adev->mman.clear_entities[0];
*fence = dma_fence_get_stub();
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
@@ -2620,11 +2648,8 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
struct amdgpu_res_cursor dst;
int r;
- if (!adev->mman.buffer_funcs_enabled) {
- dev_err(adev->dev,
- "Trying to clear memory with ring turned off.\n");
+ if (!entity)
return -EINVAL;
- }
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
@@ -2660,6 +2685,20 @@ error:
return r;
}
+struct amdgpu_ttm_buffer_entity *
+amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev)
+{
+ struct amdgpu_mman *mman = &adev->mman;
+ u32 i;
+
+ if (mman->num_clear_entities == 0)
+ return NULL;
+
+ i = atomic_inc_return(&mman->next_clear_entity) %
+ mman->num_clear_entities;
+ return &mman->clear_entities[i];
+}
+
/**
* amdgpu_ttm_evict_resources - evict memory buffers
* @adev: amdgpu device object
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bf101215757e..3b1973611446 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -72,8 +72,12 @@ struct amdgpu_mman {
/* @default_entity: for workarounds, has no gart windows */
struct amdgpu_ttm_buffer_entity default_entity;
- struct amdgpu_ttm_buffer_entity clear_entity;
- struct amdgpu_ttm_buffer_entity move_entity;
+ struct amdgpu_ttm_buffer_entity *clear_entities;
+ atomic_t next_clear_entity;
+ u32 num_clear_entities;
+ struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
+ atomic_t next_move_entity;
+ u32 num_move_entities;
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
@@ -191,6 +195,7 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
struct dma_resv *resv,
struct dma_fence **f,
u64 k_job_id);
+struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 7f64b783954a..366728ed03e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -600,6 +600,13 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
goto unpin_bo;
}
+ /* Validate doorbell_offset is within the doorbell BO */
+ if ((u64)db_info->doorbell_offset * db_size + db_size >
+ amdgpu_bo_size(db_obj->obj)) {
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
db_info->doorbell_offset, db_size);
drm_dbg_driver(adev_to_drm(uq_mgr->adev),
@@ -997,6 +1004,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
unsigned long queue_id;
int ret = 0, r;
+ mutex_lock(&uq_mgr->userq_mutex);
/* Resume all the queues for this process */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
@@ -1012,6 +1020,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
ret = r;
}
+ mutex_unlock(&uq_mgr->userq_mutex);
if (ret)
drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
@@ -1215,23 +1224,21 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
struct dma_fence *ev_fence;
int ret;
- mutex_lock(&uq_mgr->userq_mutex);
ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
if (!dma_fence_is_signaled(ev_fence))
- goto unlock;
+ goto put_fence;
ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
- goto unlock;
+ goto put_fence;
}
ret = amdgpu_userq_restore_all(uq_mgr);
if (ret)
drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
-unlock:
- mutex_unlock(&uq_mgr->userq_mutex);
+put_fence:
dma_fence_put(ev_fence);
}
@@ -1454,17 +1461,19 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
+
adev->userq_halt_for_enforce_isolation = false;
+
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
r = amdgpu_userq_restore_helper(queue);
if (r)
ret = r;
- }
+ }
mutex_unlock(&uqm->userq_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 5cef8cd14148..e54295b56282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -328,11 +328,9 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return r;
}
- r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
- if (r) {
- dev_err(adev->dev, "allocating fence slot failed (%d)\n", r);
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
+ if (r)
goto error_unlock;
- }
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev, rbo->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 641fe91b4f03..3b32f41c3655 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -173,7 +173,7 @@ struct amdgpu_bo_vm;
#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
- AMDGPU_VA_RESERVED_SEQ64_SIZE)
-#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
+#define AMDGPU_VA_RESERVED_TRAP_SIZE (1ULL << 16)
#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
- AMDGPU_VA_RESERVED_TRAP_SIZE)
#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 78d1f3eb522e..ae39b9e1f7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1722,6 +1722,20 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 7 &&
+ adev->gfx.mec_fw_version >= 5) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 2568eeaae945..fd691b2a6e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -767,7 +767,7 @@ static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 6349e239a367..e6db87b94eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -751,7 +751,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index f1079bd8cf00..6e184ea069ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -825,7 +825,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
gmc_v12_1_init_vram_info(adev);
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
adev->gmc.vram_type = vram_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 1ca0202cfdea..d865059e884a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1823,24 +1823,37 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
+static void gmc_v9_0_init_vram_info(struct amdgpu_device *adev)
{
static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ int dev_var = adev->pdev->device & 0xF;
u32 vram_info;
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
- adev->gmc.vram_width = 128 * 64;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
- adev->rev_id == 0x3)
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
-
- if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
+ if (adev->gmc.is_app_apu) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+ } else if (adev->flags & AMD_IS_APU) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ adev->gmc.vram_width = 64 * 64;
+ } else if (amdgpu_is_multi_aid(adev)) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ (dev_var == 0x5))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
}
@@ -1856,19 +1869,11 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (amdgpu_is_multi_aid(adev)) {
- gmc_v9_4_3_init_vram_info(adev);
- } else if (!adev->bios) {
- if (adev->flags & AMD_IS_APU) {
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
- adev->gmc.vram_width = 64 * 64;
- } else {
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
- adev->gmc.vram_width = 128 * 64;
- }
+ if (!adev->bios) {
+ gmc_v9_0_init_vram_info(adev);
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
- &vram_width, &vram_type, &vram_vendor);
+ r = amdgpu_gmc_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
if (amdgpu_sriov_vf(adev))
/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
* and DF related registers is not readable, seems hardcord is the
@@ -1896,6 +1901,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
}
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index 8c74894254f7..faac21ee5739 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -324,8 +324,10 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
2048);
- if (r)
+ if (r) {
+ kfree(compute_mqd);
goto free_mqd;
+ }
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
@@ -365,12 +367,16 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
shadow_info.shadow_size);
- if (r)
+ if (r) {
+ kfree(mqd_gfx_v11);
goto free_mqd;
+ }
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
shadow_info.csa_size);
- if (r)
+ if (r) {
+ kfree(mqd_gfx_v11);
goto free_mqd;
+ }
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
@@ -390,8 +396,10 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
}
r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
32);
- if (r)
+ if (r) {
+ kfree(mqd_sdma_v11);
goto free_mqd;
+ }
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 9aa988982304..fb7aaf5ae05c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -170,7 +170,8 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
int retry_loop;
/* For a reset done at the end of S3, only wait for TOS to be unloaded */
- if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ if ((adev->in_s4 || adev->in_s3) && !(adev->flags & AMD_IS_APU) &&
+ amdgpu_in_reset(adev))
return psp_v11_wait_for_tos_unload(psp);
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 73ce3d211ed6..8a9ba2276275 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -93,6 +93,11 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+
+ /* UVD doesn't work without DPM, it needs DPM to ungate it. */
+ if (!amdgpu_dpm)
+ return -ENOENT;
+
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
index 47947b94926b..0da7e1db55c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
@@ -144,9 +144,10 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf0_full %d, ttrace_buf1_full %d ttrace_utc_err %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
@@ -155,9 +156,10 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SA_ID),
@@ -167,9 +169,10 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
@@ -246,7 +249,8 @@ static bool event_interrupt_isr_v12_1(struct kfd_node *node,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (!kfd_irq_is_from_node(node, node_id, vmid)) {
- pr_debug("Interrupt not for Node, node_id: %d, vmid: %d\n", node_id, vmid);
+ dev_dbg_ratelimited(node->adev->dev,
+ "Interrupt not for Node, node_id: %d, vmid: %d\n", node_id, vmid);
return false;
}
@@ -266,9 +270,9 @@ static bool event_interrupt_isr_v12_1(struct kfd_node *node,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ dev_dbg(node->adev->dev, "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ dev_dbg(node->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
@@ -361,10 +365,10 @@ static void event_interrupt_wq_v12_1(struct kfd_node *node,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(node, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(node, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(node, pasid,
@@ -374,7 +378,7 @@ static void event_interrupt_wq_v12_1(struct kfd_node *node,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(node, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 10bc81ce37cb..964efa325908 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -136,7 +136,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
u64 size;
int r;
- entity = &adev->mman.move_entity;
+ entity = &adev->mman.move_entities[0];
mutex_lock(&entity->lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 979ae94ac966..e8f97de9d6e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -42,9 +42,16 @@ static uint64_t mqd_stride_v9(struct mqd_manager *mm,
struct queue_properties *q)
{
if (mm->dev->kfd->cwsr_enabled &&
- q->type == KFD_QUEUE_TYPE_COMPUTE)
- return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
+ q->type == KFD_QUEUE_TYPE_COMPUTE) {
+
+ /* On gfxv9, the MQD resides in the first 4K page,
+ * followed by the control stack. Align both to
+ * AMDGPU_GPU_PAGE_SIZE to maintain the required 4K boundary.
+ */
+
+ return ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE);
+ }
return mm->mqd_size;
}
@@ -150,8 +157,8 @@ static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
if (!mqd_mem_obj)
return NULL;
retval = amdgpu_amdkfd_alloc_kernel_mem(node->adev,
- (ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
+ (ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE)) *
NUM_XCC(node->xcc_mask),
mqd_on_vram(node->adev) ? AMDGPU_GEM_DOMAIN_VRAM :
AMDGPU_GEM_DOMAIN_GTT,
@@ -359,7 +366,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
struct kfd_context_save_area_header header;
/* Control stack is located one page after MQD. */
- void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
+ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE);
m = get_mqd(mqd);
@@ -399,7 +406,7 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
{
struct v9_mqd *m;
/* Control stack is located one page after MQD. */
- void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
+ void *ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE);
m = get_mqd(mqd);
@@ -445,7 +452,7 @@ static void restore_mqd(struct mqd_manager *mm, void **mqd,
*gart_addr = addr;
/* Control stack is located one page after MQD. */
- ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
+ ctl_stack = (void *)((uintptr_t)*mqd + AMDGPU_GPU_PAGE_SIZE);
memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
m->cp_hqd_pq_doorbell_control =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 85f2bc3fbf85..fa025bea9b4f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -102,8 +102,8 @@
* The first chunk is the TBA used for the CWSR ISA code. The second
* chunk is used as TMA for user-mode trap handler setup in daisy-chain mode.
*/
-#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
-#define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048)
+#define KFD_CWSR_TBA_TMA_SIZE (AMDGPU_GPU_PAGE_SIZE * 2)
+#define KFD_CWSR_TMA_OFFSET (AMDGPU_GPU_PAGE_SIZE + 2048)
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
(KFD_MAX_NUM_OF_PROCESSES * \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a031166f270c..bcd21204aa50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -679,7 +679,7 @@ static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
void kfd_procfs_del_queue(struct queue *q)
{
- if (!q)
+ if (!q || !q->process->kobj)
return;
kobject_del(&q->kobj);
@@ -858,6 +858,7 @@ int kfd_create_process_sysfs(struct kfd_process *process)
if (ret) {
pr_warn("Creating procfs pid directory failed");
kobject_put(process->kobj);
+ process->kobj = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index bbe869ceae3f..28354a4e5dd5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -249,10 +249,10 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev->node_props.gfx_target_version < 90000)
/* metadata_queue_size not supported on GFX7/GFX8 */
expected_queue_size =
- properties->queue_size / 2;
+ PAGE_ALIGN(properties->queue_size / 2);
else
expected_queue_size =
- properties->queue_size + properties->metadata_queue_size;
+ PAGE_ALIGN(properties->queue_size + properties->metadata_queue_size);
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
@@ -492,10 +492,11 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask);
wave_num = get_num_waves(props, gfxv, cu_num);
- wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE);
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props),
+ AMDGPU_GPU_PAGE_SIZE);
ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
- PAGE_SIZE);
+ AMDGPU_GPU_PAGE_SIZE);
if ((gfxv / 10000 * 10000) == 100000) {
/* HW design limits control stack size to 0x7000.
@@ -507,7 +508,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
props->ctl_stack_size = ctl_stack_size;
props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN);
- props->cwsr_size = ctl_stack_size + wg_data_size;
+ props->cwsr_size = ALIGN(ctl_stack_size + wg_data_size, PAGE_SIZE);
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 37fdcaf7192f..b120fdb0ef77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -628,9 +628,8 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
}
}
- r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(bo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
if (r) {
- pr_debug("failed %d to reserve bo\n", r);
amdgpu_bo_unreserve(bo);
goto reserve_bo_failed;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 8e949fe77312..89350aa9ca7e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -40,7 +40,8 @@ AMDGPUDM = \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
amdgpu_dm_wb.o \
- amdgpu_dm_colorop.o
+ amdgpu_dm_colorop.o \
+ amdgpu_dm_ism.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 205a7fab1037..21635e80349a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3283,6 +3283,7 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
mutex_lock(&dm->dc_lock);
+ amdgpu_dm_ism_disable(dm);
dc_allow_idle_optimizations(adev->dm.dc, false);
dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
@@ -3316,6 +3317,9 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
amdgpu_dm_irq_suspend(adev);
+ scoped_guard(mutex, &dm->dc_lock)
+ amdgpu_dm_ism_disable(dm);
+
hpd_rx_irq_work_suspend(dm);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
@@ -3606,6 +3610,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_resume(dm->dc);
+ amdgpu_dm_ism_enable(dm);
amdgpu_dm_irq_resume_early(adev);
for (i = 0; i < dc_state->stream_count; i++) {
@@ -3666,6 +3671,9 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* program HPD filter */
dc_resume(dm->dc);
+ scoped_guard(mutex, &dm->dc_lock)
+ amdgpu_dm_ism_enable(dm);
+
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
@@ -5581,6 +5589,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 6, 0):
+ case IP_VERSION(4, 2, 0):
replay_feature_enabled = true;
break;
@@ -9333,31 +9342,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (acrtc_state) {
timing = &acrtc_state->stream->timing;
- /*
- * Depending on when the HW latching event of double-buffered
- * registers happen relative to the PSR SDP deadline, and how
- * bad the Panel clock has drifted since the last ALPM off
- * event, there can be up to 3 frames of delay between sending
- * the PSR exit cmd to DMUB fw, and when the panel starts
- * displaying live frames.
- *
- * We can set:
- *
- * 20/100 * offdelay_ms = 3_frames_ms
- * => offdelay_ms = 5 * 3_frames_ms
- *
- * This ensures that `3_frames_ms` will only be experienced as a
- * 20% delay on top how long the display has been static, and
- * thus make the delay less perceivable.
- */
- if (acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
- offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
- timing->v_total *
- timing->h_total,
- timing->pix_clk_100hz);
- config.offdelay_ms = offdelay ?: 30;
- } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
!(adev->flags & AMD_IS_APU)) {
/*
@@ -9889,7 +9874,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
}
/* Decrement skip count when SR is enabled and we're doing fast updates. */
- if (acrtc_state->update_type <= UPDATE_TYPE_FAST &&
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
(psr->psr_feature_enabled || pr->config.replay_supported)) {
if (aconn->sr_skip_count > 0)
aconn->sr_skip_count--;
@@ -10099,7 +10084,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* fast updates.
*/
if (crtc->state->async_flip &&
- (acrtc_state->update_type > UPDATE_TYPE_FAST ||
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
@@ -10107,7 +10092,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
- acrtc_state->update_type <= UPDATE_TYPE_FAST &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 82727f6ec469..c3c588294665 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -124,37 +124,37 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
* - Enable condition same as above
* - Disable when vblank counter is enabled
*/
-static void amdgpu_dm_crtc_set_panel_sr_feature(
- struct vblank_control_work *vblank_work,
+void amdgpu_dm_crtc_set_panel_sr_feature(
+ struct amdgpu_display_manager *dm,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream,
bool vblank_enabled, bool allow_sr_entry)
{
- struct dc_link *link = vblank_work->stream->link;
+ struct dc_link *link = stream->link;
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
- bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
- amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+ amdgpu_dm_crc_window_is_activated(&acrtc->base);
#endif
if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
- amdgpu_dm_replay_enable(vblank_work->stream, true);
+ amdgpu_dm_replay_enable(stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream, false);
+ amdgpu_dm_psr_disable(stream, false);
} else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
- (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
+ (struct amdgpu_dm_connector *) stream->dm_stream_context;
if (!aconn->disallow_edp_enter_psr) {
- struct amdgpu_display_manager *dm = vblank_work->dm;
-
- amdgpu_dm_psr_enable(vblank_work->stream);
+ amdgpu_dm_psr_enable(stream);
if (dm->idle_workqueue &&
(dm->dc->config.disable_ips == DMUB_IPS_ENABLE) &&
dm->dc->idle_optimizations_allowed &&
@@ -251,33 +251,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
mutex_lock(&dm->dc_lock);
- if (vblank_work->enable)
+ if (vblank_work->enable) {
dm->active_vblank_irq_count++;
- else if (dm->active_vblank_irq_count)
- dm->active_vblank_irq_count--;
-
- if (dm->active_vblank_irq_count > 0)
- dc_allow_idle_optimizations(dm->dc, false);
-
- /*
- * Control PSR based on vblank requirements from OS
- *
- * If panel supports PSR SU, there's no need to disable PSR when OS is
- * submitting fast atomic commits (we infer this by whether the OS
- * requests vblank events). Fast atomic commits will simply trigger a
- * full-frame-update (FFU); a specific case of selective-update (SU)
- * where the SU region is the full hactive*vactive region. See
- * fill_dc_dirty_rects().
- */
- if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
- amdgpu_dm_crtc_set_panel_sr_feature(
- vblank_work, vblank_work->enable,
- vblank_work->acrtc->dm_irq_params.allow_sr_entry);
- }
-
- if (dm->active_vblank_irq_count == 0) {
- dc_post_update_surfaces_to_stream(dm->dc);
- dc_allow_idle_optimizations(dm->dc, true);
+ amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED);
+ } else {
+ if (dm->active_vblank_irq_count > 0)
+ dm->active_vblank_irq_count--;
+ amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism,
+ DM_ISM_EVENT_ENTER_IDLE_REQUESTED);
}
mutex_unlock(&dm->dc_lock);
@@ -476,6 +458,9 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
{
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ amdgpu_dm_ism_fini(&acrtc->ism);
drm_crtc_cleanup(crtc);
kfree(crtc);
}
@@ -685,7 +670,7 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
- dm_crtc_state->update_type > UPDATE_TYPE_FAST) {
+ dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
@@ -719,6 +704,35 @@ static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+/*
+ * This hysteresis filter as configured will:
+ *
+ * * Search through the latest 8[filter_history_size] entries in history,
+ * skipping entries that are older than [filter_old_history_threshold] frames
+ * (0 means ignore age)
+ * * Searches for short-idle-periods that lasted shorter than
+ * 4[filter_num_frames] frames-times
+ * * If there is at least 1[filter_entry_count] short-idle-period, then a delay
+ * of 4[activation_num_delay_frames] will applied before allowing idle
+ * optimizations again.
+ * * An additional delay of 11[sso_num_frames] is applied before enabling
+ * panel-specific optimizations.
+ *
+ * The values were determined empirically on another OS, optimizing for Z8
+ * residency on APUs when running a productivity + web browsing test.
+ *
+ * TODO: Run similar tests to determine if these values are also optimal for
+ * Linux, and if each APU generation benefits differently.
+ */
+static struct amdgpu_dm_ism_config default_ism_config = {
+ .filter_num_frames = 4,
+ .filter_history_size = 8,
+ .filter_entry_count = 1,
+ .activation_num_delay_frames = 4,
+ .filter_old_history_threshold = 0,
+ .sso_num_frames = 11,
+};
+
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@@ -749,6 +763,8 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
if (res)
goto fail;
+ amdgpu_dm_ism_init(&acrtc->ism, &default_ism_config);
+
drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
/* Create (reset) the plane state */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index c1212947a77b..3a8094013a5d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -27,6 +27,12 @@
#ifndef __AMDGPU_DM_CRTC_H__
#define __AMDGPU_DM_CRTC_H__
+void amdgpu_dm_crtc_set_panel_sr_feature(
+ struct amdgpu_display_manager *dm,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream,
+ bool vblank_enabled, bool allow_sr_entry);
+
void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c
new file mode 100644
index 000000000000..65a5cfe1e106
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/types.h>
+#include <drm/drm_vblank.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm_ism.h"
+#include "amdgpu_dm_crtc.h"
+#include "amdgpu_dm_trace.h"
+
+/**
+ * dm_ism_next_state - Get next state based on current state and event
+ *
+ * This function defines the idle state management FSM. Invalid transitions
+ * are ignored and will not progress the FSM.
+ */
+static bool dm_ism_next_state(enum amdgpu_dm_ism_state current_state,
+ enum amdgpu_dm_ism_event event,
+ enum amdgpu_dm_ism_state *next_state)
+{
+ switch (STATE_EVENT(current_state, event)) {
+ case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING,
+ DM_ISM_EVENT_ENTER_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_HYSTERESIS_WAITING;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_FULL_POWER_BUSY;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY,
+ DM_ISM_EVENT_ENTER_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY,
+ DM_ISM_EVENT_END_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_TIMER_ABORTED;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
+ DM_ISM_EVENT_TIMER_ELAPSED):
+ *next_state = DM_ISM_STATE_OPTIMIZED_IDLE;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
+ DM_ISM_EVENT_IMMEDIATE):
+ *next_state = DM_ISM_STATE_OPTIMIZED_IDLE;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_FULL_POWER_BUSY;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY,
+ DM_ISM_EVENT_END_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_HYSTERESIS_WAITING;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
+ DM_ISM_EVENT_SSO_TIMER_ELAPSED):
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
+ DM_ISM_EVENT_IMMEDIATE):
+ *next_state = DM_ISM_STATE_OPTIMIZED_IDLE_SSO;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
+ *next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ break;
+ case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
+ *next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
+ break;
+
+ case STATE_EVENT(DM_ISM_STATE_TIMER_ABORTED,
+ DM_ISM_EVENT_IMMEDIATE):
+ *next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ break;
+
+ default:
+ return false;
+ }
+ return true;
+}
+
+static uint64_t dm_ism_get_sso_delay(const struct amdgpu_dm_ism *ism,
+ const struct dc_stream_state *stream)
+{
+ const struct amdgpu_dm_ism_config *config = &ism->config;
+ uint32_t v_total, h_total;
+ uint64_t one_frame_ns, sso_delay_ns;
+
+ if (!stream)
+ return 0;
+
+ if (!config->sso_num_frames)
+ return 0;
+
+ v_total = stream->timing.v_total;
+ h_total = stream->timing.h_total;
+
+ one_frame_ns = div64_u64(v_total * h_total * 10000000ull,
+ stream->timing.pix_clk_100hz);
+ sso_delay_ns = config->sso_num_frames * one_frame_ns;
+
+ return sso_delay_ns;
+}
+
+/**
+ * dm_ism_get_idle_allow_delay - Calculate hysteresis-based idle allow delay
+ */
+static uint64_t dm_ism_get_idle_allow_delay(const struct amdgpu_dm_ism *ism,
+ const struct dc_stream_state *stream)
+{
+ const struct amdgpu_dm_ism_config *config = &ism->config;
+ uint32_t v_total, h_total;
+ uint64_t one_frame_ns, short_idle_ns, old_hist_ns;
+ uint32_t history_size;
+ int pos;
+ uint32_t short_idle_count = 0;
+ uint64_t ret_ns = 0;
+
+ if (!stream)
+ return 0;
+
+ if (!config->filter_num_frames)
+ return 0;
+ if (!config->filter_entry_count)
+ return 0;
+ if (!config->activation_num_delay_frames)
+ return 0;
+
+ v_total = stream->timing.v_total;
+ h_total = stream->timing.h_total;
+
+ one_frame_ns = div64_u64(v_total * h_total * 10000000ull,
+ stream->timing.pix_clk_100hz);
+
+ short_idle_ns = config->filter_num_frames * one_frame_ns;
+ old_hist_ns = config->filter_old_history_threshold * one_frame_ns;
+
+ /*
+ * Look back into the recent history and count how many times we entered
+ * idle power state for a short duration of time
+ */
+ history_size = min(
+ max(config->filter_history_size, config->filter_entry_count),
+ AMDGPU_DM_IDLE_HIST_LEN);
+ pos = ism->next_record_idx;
+
+ for (int k = 0; k < history_size; k++) {
+ if (pos <= 0 || pos > AMDGPU_DM_IDLE_HIST_LEN)
+ pos = AMDGPU_DM_IDLE_HIST_LEN;
+ pos -= 1;
+
+ if (ism->records[pos].duration_ns <= short_idle_ns)
+ short_idle_count += 1;
+
+ if (short_idle_count >= config->filter_entry_count)
+ break;
+
+ if (old_hist_ns > 0 &&
+ ism->last_idle_timestamp_ns - ism->records[pos].timestamp_ns > old_hist_ns)
+ break;
+ }
+
+ if (short_idle_count >= config->filter_entry_count)
+ ret_ns = config->activation_num_delay_frames * one_frame_ns;
+
+ return ret_ns;
+}
+
+/**
+ * dm_ism_insert_record - Insert a record into the circular history buffer
+ */
+static void dm_ism_insert_record(struct amdgpu_dm_ism *ism)
+{
+ struct amdgpu_dm_ism_record *record;
+
+ if (ism->next_record_idx < 0 ||
+ ism->next_record_idx >= AMDGPU_DM_IDLE_HIST_LEN)
+ ism->next_record_idx = 0;
+
+ record = &ism->records[ism->next_record_idx];
+ ism->next_record_idx += 1;
+
+ record->timestamp_ns = ktime_get_ns();
+ record->duration_ns =
+ record->timestamp_ns - ism->last_idle_timestamp_ns;
+}
+
+
+static void dm_ism_set_last_idle_ts(struct amdgpu_dm_ism *ism)
+{
+ ism->last_idle_timestamp_ns = ktime_get_ns();
+}
+
+
+static bool dm_ism_trigger_event(struct amdgpu_dm_ism *ism,
+ enum amdgpu_dm_ism_event event)
+{
+ enum amdgpu_dm_ism_state next_state;
+
+ bool gotNextState = dm_ism_next_state(ism->current_state, event,
+ &next_state);
+
+ if (gotNextState) {
+ ism->previous_state = ism->current_state;
+ ism->current_state = next_state;
+ }
+
+ return gotNextState;
+}
+
+
+static void dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism *ism,
+ struct dc_stream_state *stream,
+ bool vblank_enabled,
+ bool allow_panel_sso)
+{
+ struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
+ struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int r;
+
+ trace_amdgpu_dm_ism_commit(dm->active_vblank_irq_count,
+ vblank_enabled,
+ allow_panel_sso);
+
+ /*
+ * If there is an active vblank requestor, or if SSO is being engaged,
+ * then disallow idle optimizations.
+ */
+ if (vblank_enabled || allow_panel_sso)
+ dc_allow_idle_optimizations(dm->dc, false);
+
+ /*
+ * Control PSR based on vblank requirements from OS
+ *
+ * If panel supports PSR SU/Replay, there's no need to exit self-refresh
+ * when OS is submitting fast atomic commits, as they can allow
+ * self-refresh during vblank periods.
+ */
+ if (stream && stream->link) {
+ /*
+ * If allow_panel_sso is true when disabling vblank, allow
+ * deeper panel sleep states such as PSR1 and Replay static
+ * screen optimization.
+ */
+ if (!vblank_enabled && allow_panel_sso) {
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ dm, acrtc, stream, false,
+ acrtc->dm_irq_params.allow_sr_entry);
+ } else if (vblank_enabled) {
+ /* Make sure to exit SSO on vblank enable */
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ dm, acrtc, stream, true,
+ acrtc->dm_irq_params.allow_sr_entry);
+ }
+ /*
+ * Else, vblank_enabled == false and allow_panel_sso == false;
+ * do nothing here.
+ */
+ }
+
+ /*
+ * Check for any active drm vblank requestors on other CRTCs
+ * (dm->active_vblank_irq_count) before allowing HW-wide idle
+ * optimizations.
+ *
+ * There's no need to have a "balanced" check when disallowing idle
+ * optimizations at the start of this func -- we should disallow
+ * whenever there's *an* active CRTC.
+ */
+ if (!vblank_enabled && dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
+ dc_allow_idle_optimizations(dm->dc, true);
+
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
+}
+
+
+static enum amdgpu_dm_ism_event dm_ism_dispatch_power_state(
+ struct amdgpu_dm_ism *ism,
+ struct dm_crtc_state *acrtc_state,
+ enum amdgpu_dm_ism_event event)
+{
+ enum amdgpu_dm_ism_event ret = event;
+ const struct amdgpu_dm_ism_config *config = &ism->config;
+ uint64_t delay_ns, sso_delay_ns;
+
+ switch (ism->previous_state) {
+ case DM_ISM_STATE_HYSTERESIS_WAITING:
+ /*
+ * Stop the timer if it was set, and we're not running from the
+ * idle allow worker.
+ */
+ if (ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE &&
+ ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE_SSO)
+ cancel_delayed_work(&ism->delayed_work);
+ break;
+ case DM_ISM_STATE_OPTIMIZED_IDLE:
+ if (ism->current_state == DM_ISM_STATE_OPTIMIZED_IDLE_SSO)
+ break;
+ /* If idle disallow, cancel SSO work and insert record */
+ cancel_delayed_work(&ism->sso_delayed_work);
+ dm_ism_insert_record(ism);
+ dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
+ true, false);
+ break;
+ case DM_ISM_STATE_OPTIMIZED_IDLE_SSO:
+ /* Disable idle optimization */
+ dm_ism_insert_record(ism);
+ dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
+ true, false);
+ break;
+ default:
+ break;
+ }
+
+ switch (ism->current_state) {
+ case DM_ISM_STATE_HYSTERESIS_WAITING:
+ dm_ism_set_last_idle_ts(ism);
+
+ /* CRTC can be disabled; allow immediate idle */
+ if (!acrtc_state->stream) {
+ ret = DM_ISM_EVENT_IMMEDIATE;
+ break;
+ }
+
+ delay_ns = dm_ism_get_idle_allow_delay(ism,
+ acrtc_state->stream);
+ if (delay_ns == 0) {
+ ret = DM_ISM_EVENT_IMMEDIATE;
+ break;
+ }
+
+ /* Schedule worker */
+ mod_delayed_work(system_unbound_wq, &ism->delayed_work,
+ nsecs_to_jiffies(delay_ns));
+
+ break;
+ case DM_ISM_STATE_OPTIMIZED_IDLE:
+ sso_delay_ns = dm_ism_get_sso_delay(ism, acrtc_state->stream);
+ if (sso_delay_ns == 0)
+ ret = DM_ISM_EVENT_IMMEDIATE;
+ else if (config->sso_num_frames < config->filter_num_frames) {
+ /*
+ * If sso_num_frames is less than hysteresis frames, it
+ * indicates that allowing idle here, then disallowing
+ * idle after sso_num_frames has expired, will likely
+ * have a negative power impact. Skip idle allow here,
+ * and let the sso_delayed_work handle it.
+ */
+ mod_delayed_work(system_unbound_wq,
+ &ism->sso_delayed_work,
+ nsecs_to_jiffies(sso_delay_ns));
+ } else {
+ /* Enable idle optimization without SSO */
+ dm_ism_commit_idle_optimization_state(
+ ism, acrtc_state->stream, false, false);
+ mod_delayed_work(system_unbound_wq,
+ &ism->sso_delayed_work,
+ nsecs_to_jiffies(sso_delay_ns));
+ }
+ break;
+ case DM_ISM_STATE_OPTIMIZED_IDLE_SSO:
+ /* Enable static screen optimizations. */
+ dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
+ false, true);
+ break;
+ case DM_ISM_STATE_TIMER_ABORTED:
+ dm_ism_insert_record(ism);
+ dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
+ true, false);
+ ret = DM_ISM_EVENT_IMMEDIATE;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static char *dm_ism_events_str[DM_ISM_NUM_EVENTS] = {
+ [DM_ISM_EVENT_IMMEDIATE] = "IMMEDIATE",
+ [DM_ISM_EVENT_ENTER_IDLE_REQUESTED] = "ENTER_IDLE_REQUESTED",
+ [DM_ISM_EVENT_EXIT_IDLE_REQUESTED] = "EXIT_IDLE_REQUESTED",
+ [DM_ISM_EVENT_BEGIN_CURSOR_UPDATE] = "BEGIN_CURSOR_UPDATE",
+ [DM_ISM_EVENT_END_CURSOR_UPDATE] = "END_CURSOR_UPDATE",
+ [DM_ISM_EVENT_TIMER_ELAPSED] = "TIMER_ELAPSED",
+ [DM_ISM_EVENT_SSO_TIMER_ELAPSED] = "SSO_TIMER_ELAPSED",
+};
+
+static char *dm_ism_states_str[DM_ISM_NUM_STATES] = {
+ [DM_ISM_STATE_FULL_POWER_RUNNING] = "FULL_POWER_RUNNING",
+ [DM_ISM_STATE_FULL_POWER_BUSY] = "FULL_POWER_BUSY",
+ [DM_ISM_STATE_HYSTERESIS_WAITING] = "HYSTERESIS_WAITING",
+ [DM_ISM_STATE_HYSTERESIS_BUSY] = "HYSTERESIS_BUSY",
+ [DM_ISM_STATE_OPTIMIZED_IDLE] = "OPTIMIZED_IDLE",
+ [DM_ISM_STATE_OPTIMIZED_IDLE_SSO] = "OPTIMIZED_IDLE_SSO",
+ [DM_ISM_STATE_TIMER_ABORTED] = "TIMER_ABORTED",
+};
+
+
+void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism,
+ enum amdgpu_dm_ism_event event)
+{
+ enum amdgpu_dm_ism_event next_event = event;
+ struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
+ struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ /* ISM transitions must be called with mutex acquired */
+ ASSERT(mutex_is_locked(&dm->dc_lock));
+
+ if (!acrtc_state) {
+ trace_amdgpu_dm_ism_event(acrtc->crtc_id, "NO_STATE",
+ "NO_STATE", "N/A");
+ return;
+ }
+
+ do {
+ bool transition = dm_ism_trigger_event(ism, event);
+
+ next_event = DM_ISM_NUM_EVENTS;
+ if (transition) {
+ trace_amdgpu_dm_ism_event(
+ acrtc->crtc_id,
+ dm_ism_states_str[ism->previous_state],
+ dm_ism_states_str[ism->current_state],
+ dm_ism_events_str[event]);
+ next_event = dm_ism_dispatch_power_state(
+ ism, acrtc_state, next_event);
+ } else {
+ trace_amdgpu_dm_ism_event(
+ acrtc->crtc_id,
+ dm_ism_states_str[ism->current_state],
+ dm_ism_states_str[ism->current_state],
+ dm_ism_events_str[event]);
+ }
+
+ event = next_event;
+
+ } while (next_event < DM_ISM_NUM_EVENTS);
+}
+
+
+static void dm_ism_delayed_work_func(struct work_struct *work)
+{
+ struct amdgpu_dm_ism *ism =
+ container_of(work, struct amdgpu_dm_ism, delayed_work.work);
+ struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
+ struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ guard(mutex)(&dm->dc_lock);
+
+ amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_TIMER_ELAPSED);
+}
+
+static void dm_ism_sso_delayed_work_func(struct work_struct *work)
+{
+ struct amdgpu_dm_ism *ism =
+ container_of(work, struct amdgpu_dm_ism, sso_delayed_work.work);
+ struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
+ struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ guard(mutex)(&dm->dc_lock);
+
+ amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_SSO_TIMER_ELAPSED);
+}
+
+/**
+ * amdgpu_dm_ism_disable - Disable the ISM
+ *
+ * @dm: The amdgpu display manager
+ *
+ * Disable the idle state manager by disabling any ISM work, canceling pending
+ * work, and waiting for in-progress work to finish. After disabling, the system
+ * is left in DM_ISM_STATE_FULL_POWER_RUNNING state.
+ */
+void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm)
+{
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *acrtc;
+ struct amdgpu_dm_ism *ism;
+
+ drm_for_each_crtc(crtc, dm->ddev) {
+ acrtc = to_amdgpu_crtc(crtc);
+ ism = &acrtc->ism;
+
+ /* Cancel and disable any pending work */
+ disable_delayed_work_sync(&ism->delayed_work);
+ disable_delayed_work_sync(&ism->sso_delayed_work);
+
+ /*
+ * When disabled, leave in FULL_POWER_RUNNING state.
+ * EXIT_IDLE will not queue any work
+ */
+ amdgpu_dm_ism_commit_event(ism,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED);
+ }
+}
+
+/**
+ * amdgpu_dm_ism_enable - enable the ISM
+ *
+ * @dm: The amdgpu display manager
+ *
+ * Re-enable the idle state manager by enabling work that was disabled by
+ * amdgpu_dm_ism_disable.
+ */
+void amdgpu_dm_ism_enable(struct amdgpu_display_manager *dm)
+{
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *acrtc;
+ struct amdgpu_dm_ism *ism;
+
+ drm_for_each_crtc(crtc, dm->ddev) {
+ acrtc = to_amdgpu_crtc(crtc);
+ ism = &acrtc->ism;
+
+ enable_delayed_work(&ism->delayed_work);
+ enable_delayed_work(&ism->sso_delayed_work);
+ }
+}
+
+void amdgpu_dm_ism_init(struct amdgpu_dm_ism *ism,
+ struct amdgpu_dm_ism_config *config)
+{
+ ism->config = *config;
+
+ ism->current_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ ism->previous_state = DM_ISM_STATE_FULL_POWER_RUNNING;
+ ism->next_record_idx = 0;
+ ism->last_idle_timestamp_ns = 0;
+
+ INIT_DELAYED_WORK(&ism->delayed_work, dm_ism_delayed_work_func);
+ INIT_DELAYED_WORK(&ism->sso_delayed_work, dm_ism_sso_delayed_work_func);
+}
+
+
+void amdgpu_dm_ism_fini(struct amdgpu_dm_ism *ism)
+{
+ cancel_delayed_work_sync(&ism->sso_delayed_work);
+ cancel_delayed_work_sync(&ism->delayed_work);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.h
new file mode 100644
index 000000000000..fde0ddc8d4e4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_ISM_H__
+#define __AMDGPU_DM_ISM_H__
+
+#include <linux/workqueue.h>
+
+struct amdgpu_crtc;
+struct amdgpu_display_manager;
+
+#define AMDGPU_DM_IDLE_HIST_LEN 16
+
+enum amdgpu_dm_ism_state {
+ DM_ISM_STATE_FULL_POWER_RUNNING,
+ DM_ISM_STATE_FULL_POWER_BUSY,
+ DM_ISM_STATE_HYSTERESIS_WAITING,
+ DM_ISM_STATE_HYSTERESIS_BUSY,
+ DM_ISM_STATE_OPTIMIZED_IDLE,
+ DM_ISM_STATE_OPTIMIZED_IDLE_SSO,
+ DM_ISM_STATE_TIMER_ABORTED,
+ DM_ISM_NUM_STATES,
+};
+
+enum amdgpu_dm_ism_event {
+ DM_ISM_EVENT_IMMEDIATE,
+ DM_ISM_EVENT_ENTER_IDLE_REQUESTED,
+ DM_ISM_EVENT_EXIT_IDLE_REQUESTED,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE,
+ DM_ISM_EVENT_END_CURSOR_UPDATE,
+ DM_ISM_EVENT_TIMER_ELAPSED,
+ DM_ISM_EVENT_SSO_TIMER_ELAPSED,
+ DM_ISM_NUM_EVENTS,
+};
+
+#define STATE_EVENT(state, event) (((state) << 8) | (event))
+
+struct amdgpu_dm_ism_config {
+
+ /**
+ * @filter_num_frames: Idle periods shorter than this number of frames
+ * will be considered a "short idle period" for filtering.
+ *
+ * 0 indicates no filtering (i.e. no idle allow delay will be applied)
+ */
+ unsigned int filter_num_frames;
+
+ /**
+ * @filter_history_size: Number of recent idle periods to consider when
+ * counting the number of short idle periods.
+ */
+ unsigned int filter_history_size;
+
+ /**
+ * @filter_entry_count: When the number of short idle periods within
+ * recent &filter_history_size reaches this count, the idle allow delay
+ * will be applied.
+ *
+ * 0 indicates no filtering (i.e. no idle allow delay will be applied)
+ */
+ unsigned int filter_entry_count;
+
+ /**
+ * @activation_num_delay_frames: Defines the number of frames to wait
+ * for the idle allow delay.
+ *
+ * 0 indicates no filtering (i.e. no idle allow delay will be applied)
+ */
+ unsigned int activation_num_delay_frames;
+
+ /**
+ * @filter_old_history_threshold: A time-based restriction on top of
+ * &filter_history_size. Idle periods older than this threshold (in
+ * number of frames) will be ignored when counting the number of short
+ * idle periods.
+ *
+ * 0 indicates no time-based restriction, i.e. history is limited only
+ * by &filter_history_size.
+ */
+ unsigned int filter_old_history_threshold;
+
+ /**
+ * @sso_num_frames: Number of frames to delay before enabling static
+ * screen optimizations, such as PSR1 and Replay low HZ idle mode.
+ *
+ * 0 indicates immediate SSO enable upon allowing idle.
+ */
+ unsigned int sso_num_frames;
+};
+
+struct amdgpu_dm_ism_record {
+ /**
+ * @timestamp_ns: When idle was allowed
+ */
+ unsigned long long timestamp_ns;
+
+ /**
+ * @duration_ns: How long idle was allowed
+ */
+ unsigned long long duration_ns;
+};
+
+struct amdgpu_dm_ism {
+ struct amdgpu_dm_ism_config config;
+ unsigned long long last_idle_timestamp_ns;
+
+ enum amdgpu_dm_ism_state current_state;
+ enum amdgpu_dm_ism_state previous_state;
+
+ struct amdgpu_dm_ism_record records[AMDGPU_DM_IDLE_HIST_LEN];
+ int next_record_idx;
+
+ struct delayed_work delayed_work;
+ struct delayed_work sso_delayed_work;
+};
+
+#define ism_to_amdgpu_crtc(ism_ptr) \
+ container_of(ism_ptr, struct amdgpu_crtc, ism)
+
+void amdgpu_dm_ism_init(struct amdgpu_dm_ism *ism,
+ struct amdgpu_dm_ism_config *config);
+void amdgpu_dm_ism_fini(struct amdgpu_dm_ism *ism);
+void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism,
+ enum amdgpu_dm_ism_event event);
+void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm);
+void amdgpu_dm_ism_enable(struct amdgpu_display_manager *dm);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 54ae1c371511..81e43534ec59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -954,11 +954,9 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
return r;
}
- r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
- if (r) {
- drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
+ if (r)
goto error_unlock;
- }
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev, rbo->flags);
@@ -1374,8 +1372,16 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
/* turn off cursor */
if (crtc_state && crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
+ amdgpu_dm_ism_commit_event(
+ &amdgpu_crtc->ism,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE);
+
dc_stream_program_cursor_position(crtc_state->stream,
&position);
+
+ amdgpu_dm_ism_commit_event(
+ &amdgpu_crtc->ism,
+ DM_ISM_EVENT_END_CURSOR_UPDATE);
mutex_unlock(&adev->dm.dc_lock);
}
return;
@@ -1405,6 +1411,10 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
if (crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
+ amdgpu_dm_ism_commit_event(
+ &amdgpu_crtc->ism,
+ DM_ISM_EVENT_BEGIN_CURSOR_UPDATE);
+
if (!dc_stream_program_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
@@ -1412,6 +1422,10 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
if (!dc_stream_program_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
+
+ amdgpu_dm_ism_commit_event(
+ &amdgpu_crtc->ism,
+ DM_ISM_EVENT_END_CURSOR_UPDATE);
mutex_unlock(&adev->dm.dc_lock);
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index aa56fd6d56c3..e0fab8878d19 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -753,6 +753,69 @@ TRACE_EVENT(amdgpu_dm_brightness,
)
);
+TRACE_EVENT(amdgpu_dm_ism_commit,
+ TP_PROTO(
+ int active_vblank_irq_count,
+ bool vblank_enabled,
+ bool allow_panel_sso
+ ),
+ TP_ARGS(
+ active_vblank_irq_count,
+ vblank_enabled,
+ allow_panel_sso
+ ),
+ TP_STRUCT__entry(
+ __field(int, active_vblank_irq_count)
+ __field(bool, vblank_enabled)
+ __field(bool, allow_panel_sso)
+ ),
+ TP_fast_assign(
+ __entry->active_vblank_irq_count = active_vblank_irq_count;
+ __entry->vblank_enabled = vblank_enabled;
+ __entry->allow_panel_sso = allow_panel_sso;
+ ),
+ TP_printk(
+ "active_vblank_irq_count=%d vblank_enabled=%d allow_panel_sso=%d",
+ __entry->active_vblank_irq_count,
+ __entry->vblank_enabled,
+ __entry->allow_panel_sso
+ )
+);
+
+TRACE_EVENT(amdgpu_dm_ism_event,
+ TP_PROTO(
+ int crtc_id,
+ const char *prev_state,
+ const char *curr_state,
+ const char *event
+ ),
+ TP_ARGS(
+ crtc_id,
+ prev_state,
+ curr_state,
+ event
+ ),
+ TP_STRUCT__entry(
+ __field(int, crtc_id)
+ __string(prev_state, prev_state)
+ __string(curr_state, curr_state)
+ __string(event, event)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __assign_str(prev_state);
+ __assign_str(curr_state);
+ __assign_str(event);
+ ),
+ TP_printk(
+ "[CRTC %d] %s -> %s on event %s",
+ __entry->crtc_id,
+ __get_str(prev_state),
+ __get_str(curr_state),
+ __get_str(event))
+);
+
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index d9527c05fc87..110f0173eee6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -106,11 +106,9 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
return r;
}
- r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
- if (r) {
- drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
+ if (r)
goto error_unlock;
- }
domain = amdgpu_display_supported_domains(adev, rbo->flags);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index e46f8ce41d87..8ba9b4f56f87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -53,12 +53,31 @@ inline void dc_assert_fp_enabled(void)
{
int depth;
- depth = __this_cpu_read(fpu_recursion_depth);
+ depth = this_cpu_read(fpu_recursion_depth);
ASSERT(depth >= 1);
}
/**
+ * dc_assert_fp_enabled - Check if FPU protection is enabled
+ *
+ * This function tells if the code is already under FPU protection or not. A
+ * function that works as an API for a set of FPU operations can use this
+ * function for checking if the caller invoked it after DC_FP_START(). For
+ * example, take a look at dcn20_fpu.c file.
+ *
+ * Similar to dc_assert_fp_enabled, but does not assert, returns status instead.
+ */
+inline bool dc_is_fp_enabled(void)
+{
+ int depth;
+
+ depth = this_cpu_read(fpu_recursion_depth);
+
+ return (depth >= 1);
+}
+
+/**
* dc_fpu_begin - Enables FPU protection
* @function_name: A string containing the function name for debug purposes
* (usually __func__)
@@ -77,7 +96,7 @@ void dc_fpu_begin(const char *function_name, const int line)
WARN_ON_ONCE(!in_task());
preempt_disable();
- depth = __this_cpu_inc_return(fpu_recursion_depth);
+ depth = this_cpu_inc_return(fpu_recursion_depth);
if (depth == 1) {
BUG_ON(!kernel_fpu_available());
kernel_fpu_begin();
@@ -100,7 +119,7 @@ void dc_fpu_end(const char *function_name, const int line)
{
int depth;
- depth = __this_cpu_dec_return(fpu_recursion_depth);
+ depth = this_cpu_dec_return(fpu_recursion_depth);
if (depth == 0) {
kernel_fpu_end();
} else {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h
index 4e921632bc4e..5e95419d3798 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h
@@ -28,15 +28,30 @@
#define __DC_FPU_H__
void dc_assert_fp_enabled(void);
+bool dc_is_fp_enabled(void);
void dc_fpu_begin(const char *function_name, const int line);
void dc_fpu_end(const char *function_name, const int line);
#ifndef _LINUX_FPU_COMPILATION_UNIT
#define DC_FP_START() dc_fpu_begin(__func__, __LINE__)
#define DC_FP_END() dc_fpu_end(__func__, __LINE__)
+#ifdef CONFIG_DRM_AMD_DC_FP
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) \
+ do { \
+ bool dc_fp_enabled = dc_is_fp_enabled(); \
+ if (dc_fp_enabled) \
+ DC_FP_END(); \
+ code; \
+ if (dc_fp_enabled) \
+ DC_FP_START(); \
+ } while (0)
+#else
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !CONFIG_DRM_AMD_DC_FP
#else
#define DC_FP_START() BUILD_BUG()
#define DC_FP_END() BUILD_BUG()
-#endif
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !_LINUX_FPU_COMPILATION_UNIT
#endif /* __DC_FPU_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 8c54c02a0e26..f37a43f4172e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -2010,10 +2010,10 @@ static void calculate_bandwidth(
}
/*output link bit per pixel supported*/
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
- data->output_bpphdmi[k] = bw_def_na;
- data->output_bppdp4_lane_hbr[k] = bw_def_na;
- data->output_bppdp4_lane_hbr2[k] = bw_def_na;
- data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+ data->output_bpphdmi[k] = (uint32_t)bw_def_na;
+ data->output_bppdp4_lane_hbr[k] = (uint32_t)bw_def_na;
+ data->output_bppdp4_lane_hbr2[k] = (uint32_t)bw_def_na;
+ data->output_bppdp4_lane_hbr3[k] = (uint32_t)bw_def_na;
if (data->enable[k]) {
data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 6073cadde76c..fa10f85df3db 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -503,6 +503,7 @@ struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value,
unsigned int integer_bits,
unsigned int fractional_bits)
{
+ (void)integer_bits;
struct fixed31_32 fixpt_value = dc_fixpt_from_int(int_value);
fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index a8b750ff8573..e8736c134b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -56,6 +56,7 @@ static bool dal_vector_presized_costruct(struct vector *vector,
void *initial_value,
uint32_t struct_size)
{
+ (void)ctx;
uint32_t i;
vector->container = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index f947f82013c6..dd362071a6c9 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -1963,7 +1963,7 @@ static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
count = (le16_to_cpu(header->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
/ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
- if (count < record->sucI2cId.bfI2C_LineMux)
+ if (count <= record->sucI2cId.bfI2C_LineMux)
return BP_RESULT_BADBIOSTABLE;
/* get the GPIO_I2C_INFO */
@@ -2696,6 +2696,7 @@ static enum bp_result update_slot_layout_info(struct dc_bios *dcb,
struct slot_layout_info *slot_layout_info,
unsigned int record_offset)
{
+ (void)i;
unsigned int j;
struct bios_parser *bp;
ATOM_BRACKET_LAYOUT_RECORD *record;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 94fddf22f5a9..a1c08e1cc411 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -205,6 +205,7 @@ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
struct graphics_object_id object_id, uint32_t index,
struct graphics_object_id *src_object_id)
{
+ (void)index;
struct bios_parser *bp = BP_FROM_DCB(dcb);
unsigned int i;
enum bp_result bp_result = BP_RESULT_BADINPUT;
@@ -765,6 +766,7 @@ static enum bp_result bios_parser_get_device_tag(
uint32_t device_tag_index,
struct connector_device_tag_info *info)
{
+ (void)device_tag_index;
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct atom_display_object_path_v2 *object;
@@ -809,6 +811,7 @@ static enum bp_result get_ss_info_v4_1(
uint32_t index,
struct spread_spectrum_info *ss_info)
{
+ (void)index;
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
struct atom_smu_info_v3_3 *smu_info = NULL;
@@ -897,6 +900,7 @@ static enum bp_result get_ss_info_v4_2(
uint32_t index,
struct spread_spectrum_info *ss_info)
{
+ (void)index;
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
struct atom_smu_info_v3_1 *smu_info = NULL;
@@ -977,6 +981,7 @@ static enum bp_result get_ss_info_v4_5(
uint32_t index,
struct spread_spectrum_info *ss_info)
{
+ (void)index;
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_5 *disp_cntl_tbl = NULL;
@@ -1604,6 +1609,8 @@ static uint32_t bios_parser_get_ss_entry_number(
struct dc_bios *dcb,
enum as_signal_type signal)
{
+ (void)dcb;
+ (void)signal;
/* TODO: DAL2 atomfirmware implementation does not need this.
* why DAL3 need this?
*/
@@ -3536,6 +3543,8 @@ static uint16_t bios_parser_pack_data_tables(
struct dc_bios *dcb,
void *dst)
{
+ (void)dcb;
+ (void)dst;
// TODO: There is data bytes alignment issue, disable it for now.
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index f2b1720a6a66..17ef515c6c69 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -783,6 +783,8 @@ static enum bp_result external_encoder_control_v3(
struct bios_parser *bp,
struct bp_external_encoder_control *cntl)
{
+ (void)bp;
+ (void)cntl;
/* TODO */
return BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
index 3099128223df..cec61c9d7263 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -94,6 +94,7 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
+ (void)id;
/* On any ASIC after DCE80, we manually program the DIG_FE
* selection (see connect_dig_be_to_fe function of the link
* encoder), so translation should always return 0 (no FE).
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 349f0e5d5856..478465fba224 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -93,6 +93,7 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
+ (void)id;
/* On any ASIC after DCE80, we manually program the DIG_FE
* selection (see connect_dig_be_to_fe function of the link
* encoder), so translation should always return 0 (no FE).
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
index 1a5fefcde8af..6b8a87f2c49e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -91,6 +91,7 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
+ (void)id;
/* On any ASIC after DCE80, we manually program the DIG_FE
* selection (see connect_dig_be_to_fe function of the link
* encoder), so translation should always return 0 (no FE).
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 5a5249f3ffbd..880bce368238 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -78,6 +78,7 @@ int clk_mgr_helper_get_active_plane_cnt(
struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
int i, total_plane_count;
total_plane_count = 0;
@@ -97,7 +98,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- int edp_num;
+ unsigned int edp_num;
unsigned int panel_inst;
dc_get_edp_links(dc, edp_links, &edp_num);
@@ -123,7 +124,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- int edp_num;
+ unsigned int edp_num;
unsigned int panel_inst;
dc_get_edp_links(dc, edp_links, &edp_num);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index d50b9440210e..cd4c45516616 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -92,7 +92,7 @@ static int determine_sclk_from_bounding_box(
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
{
uint8_t j;
- uint32_t min_vertical_blank_time = -1;
+ uint32_t min_vertical_blank_time = (uint32_t)-1;
for (j = 0; j < context->stream_count; j++) {
struct dc_stream_state *stream = context->streams[j];
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index e18097f82091..09e83097a623 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -740,7 +740,8 @@ void rn_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3600000;
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index b48522480dfd..dcec9d0f8c34 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -421,10 +421,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
/* Refresh bounding box */
- DC_FP_START();
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
- DC_FP_END();
}
static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
@@ -523,6 +521,7 @@ void dcn3_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
+ (void)pp_smu;
struct clk_state_registers_and_bypass s = { 0 };
clk_mgr->base.ctx = ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 7aee02d56292..57ba7bc4d16e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -733,11 +733,12 @@ void vg_clk_mgr_construct(
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
vg_bw_params.wm_table = lpddr5_wm_table;
- } else {
+ else
vg_bw_params.wm_table = ddr4_wm_table;
- }
+
/* Saved clocks configured at boot for debug purposes */
vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 051052bd10c9..89fc482947ef 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -329,6 +329,9 @@ bool dcn31_are_clock_states_equal(struct dc_clocks *a,
static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)regs_and_bypass;
+ (void)clk_mgr_base;
+ (void)log_info;
return;
}
@@ -725,11 +728,12 @@ void dcn31_clk_mgr_construct(
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn31_bw_params.wm_table = lpddr5_wm_table;
- } else {
+ else
dcn31_bw_params.wm_table = ddr5_wm_table;
- }
+
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 0cb37827a62b..b08a70a2f571 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -395,6 +395,9 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)regs_and_bypass;
+ (void)clk_mgr_base;
+ (void)log_info;
return;
}
@@ -842,7 +845,8 @@ void dcn314_clk_mgr_construct(
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn314_bw_params.wm_table = lpddr5_wm_table;
else
dcn314_bw_params.wm_table = ddr5_wm_table;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index c49268db85f6..3a651c1a866d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -247,6 +247,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)regs_and_bypass;
+ (void)clk_mgr_base;
+ (void)log_info;
return;
}
@@ -652,11 +655,12 @@ void dcn315_clk_mgr_construct(
if (clk_mgr->base.smu_ver > 0)
clk_mgr->base.smu_present = true;
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn315_bw_params.wm_table = lpddr5_wm_table;
- } else {
+ else
dcn315_bw_params.wm_table = ddr5_wm_table;
- }
+
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 1769b1f26e75..e9d492d8c8d4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -255,6 +255,9 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)regs_and_bypass;
+ (void)clk_mgr_base;
+ (void)log_info;
return;
}
@@ -636,11 +639,12 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn316_bw_params.wm_table = lpddr5_wm_table;
- } else {
+ else
dcn316_bw_params.wm_table = ddr4_wm_table;
- }
+
/* Saved clocks configured at boot for debug purposes */
dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 2856b0337e87..fda6cade30a8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -872,6 +872,7 @@ static uint32_t dcn32_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mg
static void dcn32_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)log_info;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dprefclk_did = 0;
uint32_t dcfclk_did = 0;
@@ -1059,11 +1060,9 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
if (!clk_mgr->dpm_present)
dcn32_patch_dpm_table(clk_mgr_base->bw_params);
- DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
- DC_FP_END();
}
static bool dcn32_are_clock_states_equal(struct dc_clocks *a,
@@ -1147,6 +1146,7 @@ void dcn32_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
+ (void)pp_smu;
struct clk_log_info log_info = {0};
clk_mgr->base.ctx = ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 6fc524752613..2798088842f4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -1464,11 +1464,12 @@ void dcn35_clk_mgr_construct(
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ if (ctx->dc_bios->integrated_info &&
+ ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
dcn35_bw_params.wm_table = lpddr5_wm_table;
- } else {
+ else
dcn35_bw_params.wm_table = ddr5_wm_table;
- }
+
/* Saved clocks configured at boot for debug purposes */
dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 03464f21d119..82c1a55a2271 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -333,6 +333,7 @@ bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ (void)log_info;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dprefclk_did = 0;
uint32_t dcfclk_did = 0;
@@ -525,6 +526,7 @@ static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr
struct dc_state *context,
int ref_dtbclk_khz)
{
+ (void)ref_dtbclk_khz;
int i;
struct dccg *dccg = clk_mgr->dccg;
struct pipe_ctx *otg_master;
@@ -614,6 +616,7 @@ static void dcn401_update_clocks_update_dentist(
struct clk_mgr_internal *clk_mgr,
struct dc_state *context)
{
+ (void)context;
uint32_t new_disp_divider = 0;
uint32_t new_dispclk_wdivider = 0;
uint32_t dentist_dispclk_wdivider_readback = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
index b4c6522e922c..ec888aed207d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
@@ -43,8 +43,6 @@
#define DC_LOGGER_INIT(logger) \
struct dal_logger *dc_logger = logger
-#define DCN42_CLKIP_REFCLK 48000
-
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
@@ -160,6 +158,9 @@ void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context,
int ref_dtbclk_khz)
{
+ (void)clk_mgr;
+ (void)context;
+ (void)ref_dtbclk_khz;
/* DCN42 does not implement set_dtbclk_dto function, so this is a no-op */
}
@@ -255,6 +256,10 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
dcn42_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
+ /* Only attempt to enable dtbclk if currently disabled AND new state requests it.
+ * For dcn42b (no dtbclk hardware), init_clk_states sets dtbclk_en=false and
+ * new_clocks->dtbclk_en should always be false, so this block never executes.
+ */
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
int actual_dtbclk = 0;
@@ -326,7 +331,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
- if (!dc->debug.disable_dtb_ref_clk_switch &&
+ if (!dc->debug.disable_dtb_ref_clk_switch && new_clocks->dtbclk_en &&
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
dcn42_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
@@ -519,7 +524,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
-static void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn42_smu_dpm_clks *smu_dpm_clks)
{
DpmClocks_t_dcn42 *table = smu_dpm_clks->dpm_clks;
@@ -833,6 +838,7 @@ void dcn42_set_low_power_state(struct clk_mgr *clk_mgr_base)
void dcn42_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
+ (void)clk_mgr_base;
}
@@ -842,7 +848,7 @@ static void dcn42_init_clocks_fpga(struct clk_mgr *clk_mgr)
}
-static void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr,
+void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
@@ -895,13 +901,13 @@ static void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr,
// Both fclk and ref_dppclk run on the same scemi clock.
clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
- /* TODO: set dtbclk in correct place */
- clk_mgr->clks.dtbclk_en = true;
-
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
+ if (clk_mgr->clks.dtbclk_en) {
+ dcn42_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
+ } else {
+ clk_mgr->clks.ref_dtbclk_khz = 0;
+ }
dcn42_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower);
-
- dcn42_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
}
unsigned int dcn42_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
@@ -933,8 +939,9 @@ unsigned int dcn42_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type
return 0;
}
-static int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
+int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
{
+ (void)clk_mgr_base;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dispclk_wdivider;
int disp_divider;
@@ -954,7 +961,7 @@ bool dcn42_is_smu_present(struct clk_mgr *clk_mgr_base)
return clk_mgr->smu_present;
}
-static void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int)
+void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int)
{
struct clk_mgr *clk_mgr_base = &clk_mgr_int->base;
struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
index 5ad027a9edaf..9568ca06f00f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
@@ -27,6 +27,7 @@
#include "clk_mgr_internal.h"
#define NUM_CLOCK_SOURCES 5
+#define DCN42_CLKIP_REFCLK 48000
struct dcn42_watermarks;
@@ -71,9 +72,15 @@ void dcn42_set_low_power_state(struct clk_mgr *clk_mgr_base);
void dcn42_exit_low_power_state(struct clk_mgr *clk_mgr_base);
unsigned int dcn42_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
bool dcn42_is_smu_present(struct clk_mgr *clk_mgr_base);
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context);
int dcn42_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context, int *all_active_disps);
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context);
void dcn42_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, bool safe_to_lower);
void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dtbclk_khz);
bool dcn42_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
-bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context);
+struct dcn42_smu_dpm_clks; /* Forward declaration for pointer parameter below */
+void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, struct dcn42_smu_dpm_clks *smu_dpm_clks);
+void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int);
+void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower);
+int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base);
#endif //__DCN42_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7dac3f35f0e8..419f894c87b0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1096,11 +1096,8 @@ static bool dc_construct(struct dc *dc,
#ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
- if (dc->res_pool->funcs->update_bw_bounding_box) {
- DC_FP_START();
+ if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
- DC_FP_END();
- }
dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
if (!dc->soc_and_ip_translator)
goto fail;
@@ -1137,6 +1134,8 @@ static void disable_all_writeback_pipes_for_stream(
struct dc_stream_state *stream,
struct dc_state *context)
{
+ (void)dc;
+ (void)context;
int i;
for (i = 0; i < stream->num_wb_info; i++)
@@ -1148,6 +1147,8 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
struct dc_stream_state *stream,
bool lock)
{
+ (void)dc;
+ (void)context;
int i;
/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
@@ -1563,8 +1564,7 @@ static void detect_edp_presence(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
enum dc_connection_type type;
- int i;
- int edp_num;
+ unsigned int i, edp_num;
dc_get_edp_links(dc, edp_links, &edp_num);
if (!edp_num)
@@ -1923,10 +1923,77 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- /* block DSC for now, as VBIOS does not currently support DSC timings */
if (crtc_timing->flags.DSC) {
- DC_LOG_DEBUG("boot timing validation failed due to DSC\n");
- return false;
+ struct display_stream_compressor *dsc = NULL;
+ struct dcn_dsc_state dsc_state = {0};
+
+ /* Find DSC associated with this timing generator */
+ if (tg_inst < dc->res_pool->res_cap->num_dsc) {
+ dsc = dc->res_pool->dscs[tg_inst];
+ }
+
+ if (!dsc || !dsc->funcs->dsc_read_state) {
+ DC_LOG_DEBUG("boot timing validation failed due to no DSC resource or read function\n");
+ return false;
+ }
+
+ /* Read current DSC hardware state */
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ /* Check if DSC is actually enabled in hardware */
+ if (dsc_state.dsc_clock_en == 0) {
+ DC_LOG_DEBUG("boot timing validation failed due to DSC not enabled in hardware\n");
+ return false;
+ }
+
+ uint32_t num_slices_h = 0;
+ uint32_t num_slices_v = 0;
+
+ if (dsc_state.dsc_slice_width > 0) {
+ num_slices_h = (crtc_timing->h_addressable + dsc_state.dsc_slice_width - 1) / dsc_state.dsc_slice_width;
+ }
+
+ if (dsc_state.dsc_slice_height > 0) {
+ num_slices_v = (crtc_timing->v_addressable + dsc_state.dsc_slice_height - 1) / dsc_state.dsc_slice_height;
+ }
+
+ if (crtc_timing->dsc_cfg.num_slices_h != num_slices_h) {
+ DC_LOG_DEBUG("boot timing validation failed due to num_slices_h mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.num_slices_v != num_slices_v) {
+ DC_LOG_DEBUG("boot timing validation failed due to num_slices_v mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.bits_per_pixel != dsc_state.dsc_bits_per_pixel) {
+ DC_LOG_DEBUG("boot timing validation failed due to bits_per_pixel mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.block_pred_enable != dsc_state.dsc_block_pred_enable) {
+ DC_LOG_DEBUG("boot timing validation failed due to block_pred_enable mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.linebuf_depth != dsc_state.dsc_line_buf_depth) {
+ DC_LOG_DEBUG("boot timing validation failed due to linebuf_depth mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.version_minor != dsc_state.dsc_version_minor) {
+ DC_LOG_DEBUG("boot timing validation failed due to version_minor mismatch\n");
+ return false;
+ }
+
+ if (crtc_timing->dsc_cfg.ycbcr422_simple != dsc_state.dsc_simple_422) {
+ DC_LOG_DEBUG("boot timing validation failed due to pixel encoding mismatch\n");
+ return false;
+ }
+
+ // Skip checks for is_frl, is_dp, and rc_buffer_size which are not programmed by vbios
+ // or not necessary for seamless boot validation.
}
if (dc_is_dp_signal(link->connector_signal)) {
@@ -2695,7 +2762,7 @@ static bool is_surface_in_context(
static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
return update_type;
@@ -2787,7 +2854,7 @@ static struct surface_update_descriptor get_scaling_info_update_type(
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
return update_type;
@@ -2838,11 +2905,11 @@ static struct surface_update_descriptor get_scaling_info_update_type(
return update_type;
}
-static struct surface_update_descriptor check_update_surface(
+static struct surface_update_descriptor det_surface_update(
const struct dc_check_config *check_config,
struct dc_surface_update *u)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
if (u->surface->force_full_update) {
@@ -2862,7 +2929,7 @@ static struct surface_update_descriptor check_update_surface(
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
@@ -2876,43 +2943,27 @@ static struct surface_update_descriptor check_update_surface(
if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
- }
-
- if (u->cursor_csc_color_matrix) {
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
@@ -2928,10 +2979,9 @@ static struct surface_update_descriptor check_update_surface(
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type,
- check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
- LOCK_DESCRIPTOR_STREAM);
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
@@ -2961,6 +3011,7 @@ static struct surface_update_descriptor check_update_surface(
*/
static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count)
{
+ (void)dc;
bool has_flip_immediate_plane = false;
int i;
@@ -2985,7 +3036,7 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
int surface_count,
struct dc_stream_update *stream_update)
{
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
/* When countdown finishes, promote this flip to full to trigger deferred final transition */
if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) {
@@ -3052,18 +3103,7 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
if (su_flags->raw)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- /* Non-global cases */
- if (stream_update->hdr_static_metadata ||
- stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket ||
- stream_update->vsp_infopacket ||
- stream_update->hfvsif_infopacket ||
- stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket ||
- stream_update->avi_infopacket) {
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
- }
-
+ // Non-global cases
if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
@@ -3073,32 +3113,11 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
su_flags->bits.out_tf = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
-
- if (stream_update->periodic_interrupt) {
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
- }
-
- if (stream_update->dither_option) {
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
- }
-
- if (stream_update->cursor_position || stream_update->cursor_attributes) {
- elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
- }
-
- /* TODO - cleanup post blend CM */
- if (stream_update->func_shaper || stream_update->lut3d_func) {
- elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
- }
-
- if (stream_update->pending_test_pattern) {
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
}
for (int i = 0 ; i < surface_count; i++) {
struct surface_update_descriptor inner_type =
- check_update_surface(check_config, &updates[i]);
+ det_surface_update(check_config, &updates[i]);
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
@@ -3125,81 +3144,6 @@ struct surface_update_descriptor dc_check_update_surfaces_for_stream(
return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
-/*
- * check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
- *
- * This function performs checks on the DC global state, and is therefore not re-entrant. It
- * should not be called from DM.
- *
- * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
- */
-static struct surface_update_descriptor check_update_state_and_surfaces_for_stream(
- const struct dc *dc,
- const struct dc_check_config *check_config,
- const struct dc_stream_state *stream,
- const struct dc_surface_update *updates,
- const int surface_count,
- const struct dc_stream_update *stream_update)
-{
- const struct dc_state *context = dc->current_state;
-
- struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE};
-
- if (updates)
- for (int i = 0; i < surface_count; i++)
- if (!is_surface_in_context(context, updates[i].surface))
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
-
- if (stream) {
- const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
- if (dc->idle_optimizations_allowed)
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
-
- if (dc_can_clear_cursor_limit(dc))
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
-
- return overall_type;
-}
-
-/*
- * dc_check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
- *
- * This function performs checks on the DC global state, stream and surface update, and is
- * therefore not re-entrant. It should not be called from DM.
- *
- * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
- */
-static struct surface_update_descriptor dc_check_update_state_and_surfaces_for_stream(
- const struct dc *dc,
- const struct dc_check_config *check_config,
- struct dc_stream_state *stream,
- struct dc_surface_update *updates,
- int surface_count,
- struct dc_stream_update *stream_update)
-{
- /* check updates against the entire DC state (global) first */
- struct surface_update_descriptor overall_update_type = check_update_state_and_surfaces_for_stream(
- dc,
- check_config,
- stream,
- updates,
- surface_count,
- stream_update);
-
- /* check updates for stream and plane */
- struct surface_update_descriptor stream_update_type = dc_check_update_surfaces_for_stream(
- check_config,
- updates,
- surface_count,
- stream_update);
- elevate_update_type(&overall_update_type, stream_update_type.update_type, stream_update_type.lock_descriptor);
-
- return overall_update_type;
-}
-
static struct dc_stream_status *stream_get_status(
struct dc_state *ctx,
struct dc_stream_state *stream)
@@ -3346,6 +3290,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
struct dc_stream_state *stream,
struct dc_stream_update *update)
{
+ (void)context;
struct dc_context *dc_ctx = dc->ctx;
if (update == NULL || stream == NULL)
@@ -3556,6 +3501,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
@@ -3614,6 +3566,7 @@ static void restore_minimal_pipe_split_policy(struct dc *dc,
* @surface_count: surface update count
* @stream: Corresponding stream to be updated
* @stream_update: stream update
+ * @update_descriptor: describes what plane and stream changes to apply
* @new_update_type: [out] determined update type by the function
* @new_context: [out] new context allocated and validated if update type is
* FULL, reference to current context if update type is less than FULL.
@@ -3625,11 +3578,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
- struct surface_update_descriptor *update_descriptor,
+ enum surface_update_type *new_update_type,
struct dc_state **new_context)
{
struct dc_state *context;
int i, j;
+ enum surface_update_type update_type;
const struct dc_stream_status *stream_status;
struct dc_context *dc_ctx = dc->ctx;
@@ -3643,20 +3597,17 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- *update_descriptor = dc_check_update_state_and_surfaces_for_stream(
- dc,
- &dc->check_config,
- stream,
- srf_updates,
- surface_count,
- stream_update);
+ update_type = dc_check_update_surfaces_for_stream(
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
*/
force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
- if (update_descriptor->update_type == UPDATE_TYPE_FULL)
+ if (update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
@@ -3682,7 +3633,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
- if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
+ if (update_type == UPDATE_TYPE_FULL) {
if (stream_update) {
uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
@@ -3692,13 +3643,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
- if (update_descriptor->update_type >= update_surface_trace_level)
+ if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
for (i = 0; i < surface_count; i++)
copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
- if (update_descriptor->update_type >= UPDATE_TYPE_FULL) {
+ if (update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
for (i = 0; i < surface_count; i++)
@@ -3736,7 +3687,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_descriptor->update_type != UPDATE_TYPE_MED)
+ if (update_type != UPDATE_TYPE_MED)
continue;
if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -3750,7 +3701,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
- if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
+ if (update_type == UPDATE_TYPE_FULL) {
struct pipe_split_policy_backup policy;
bool minimize = false;
@@ -3779,7 +3730,8 @@ static bool update_planes_and_stream_state(struct dc *dc,
update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
- if (update_descriptor->update_type == UPDATE_TYPE_FULL)
+ *new_update_type = update_type;
+ if (update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3859,7 +3811,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
program_cursor_position(dc, stream);
/* Full fe update*/
- if (update_type <= UPDATE_TYPE_FAST)
+ if (update_type == UPDATE_TYPE_FAST)
continue;
if (stream_update->dsc_config)
@@ -3943,6 +3895,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
{
+ (void)dc;
if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
|| stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
&& stream->ctx->dce_version >= DCN_VERSION_3_1)
@@ -4168,7 +4121,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
- bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
if (should_lock_all_pipes)
determine_pipe_unlock_order(dc, context);
@@ -4228,7 +4181,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type <= UPDATE_TYPE_FAST &&
+ if (update_type == UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4285,7 +4238,7 @@ static void commit_planes_for_stream(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
@@ -4302,7 +4255,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
- if (update_type > UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4364,7 +4317,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_inbox1_lock(dc, stream->link)) {
@@ -4435,7 +4388,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
- if (update_type > UPDATE_TYPE_FAST)
+ if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
@@ -4451,7 +4404,7 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
- if (update_type > UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -4479,7 +4432,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type <= UPDATE_TYPE_FAST &&
+ if (update_type == UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4506,7 +4459,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
- if (update_type <= UPDATE_TYPE_FAST)
+ if (update_type == UPDATE_TYPE_FAST)
continue;
stream_status =
@@ -4525,7 +4478,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
- if (update_type <= UPDATE_TYPE_FAST)
+ if (update_type == UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
@@ -4536,7 +4489,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (dc->hwss.program_front_end_for_ctx && update_type > UPDATE_TYPE_FAST) {
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
//Pipe busy until some frame and line #
@@ -4564,7 +4517,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
// Update Type FAST, Surface updates
- if (update_type <= UPDATE_TYPE_FAST) {
+ if (update_type == UPDATE_TYPE_FAST) {
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -4601,7 +4554,7 @@ static void commit_planes_for_stream(struct dc *dc,
srf_updates[i].cm->flags.bits.lut3d_enable &&
srf_updates[i].cm->flags.bits.lut3d_dma_enable &&
dc->hwss.trigger_3dlut_dma_load)
- dc->hwss.trigger_3dlut_dma_load(pipe_ctx);
+ dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
@@ -4621,7 +4574,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
@@ -4654,13 +4607,13 @@ static void commit_planes_for_stream(struct dc *dc,
/* If enabling subvp or transitioning from subvp->subvp, enable the
* phantom streams before we program front end for the phantom pipes.
*/
- if (update_type > UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST) {
if (dc->hwss.enable_phantom_streams)
dc->hwss.enable_phantom_streams(dc, context);
}
}
- if (update_type > UPDATE_TYPE_FAST)
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (subvp_prev_use && !subvp_curr_use) {
@@ -4673,7 +4626,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.disable_phantom_streams(dc, context);
}
- if (update_type > UPDATE_TYPE_FAST)
+ if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
@@ -4743,6 +4696,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
int surface_count,
bool *is_plane_addition)
{
+ (void)srf_updates;
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
@@ -5145,12 +5099,198 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
+void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ int i = 0;
+
+ if (stream_update) {
+ fast_update[0].out_transfer_func = stream_update->out_transfer_func;
+ fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ } else {
+ fast_update[0].out_transfer_func = NULL;
+ fast_update[0].output_csc_transform = NULL;
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ fast_update[i].flip_addr = srf_updates[i].flip_addr;
+ fast_update[i].gamma = srf_updates[i].gamma;
+ fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
+ fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
+ fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
+#if defined(CONFIG_DRM_AMD_DC_DCN4_2)
+ fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
+#endif
+ }
+}
+
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].flip_addr ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].input_csc_color_matrix ||
+ fast_update[i].cursor_csc_color_matrix ||
+#if defined(CONFIG_DRM_AMD_DC_DCN4_2)
+ fast_update[i].cm_hist_control ||
+#endif
+ fast_update[i].coeff_reduction_factor)
+ return true;
+ }
+
+ return false;
+}
+
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].input_csc_color_matrix ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].coeff_reduction_factor ||
+#if defined(CONFIG_DRM_AMD_DC_DCN4_2)
+ fast_update[i].cm_hist_control ||
+#endif
+ fast_update[i].cursor_csc_color_matrix)
+ return true;
+ }
+
+ return false;
+}
+
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ (void)stream_update;
+ const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
+
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ const union dc_plane_cm_flags blend_only_flags = {
+ .bits = {
+ .blend_enable = 1,
+ }
+ };
+
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
+ if (srf_updates &&
+ (srf_updates[i].plane_info ||
+ srf_updates[i].scaling_info ||
+ (srf_updates[i].hdr_mult.value &&
+ srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ (srf_updates[i].sdr_white_level_nits &&
+ srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
+ srf_updates[i].in_transfer_func ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ (srf_updates[i].cm &&
+ ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
+ (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
+ return true;
+ }
+
+ if (stream_update &&
+ (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update) ||
+ stream_update->hdr_static_metadata ||
+ stream_update->abm_level ||
+ stream_update->periodic_interrupt ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
+ stream_update->dpms_off ||
+ stream_update->allow_freesync ||
+ stream_update->vrr_active_variable ||
+ stream_update->vrr_active_fixed ||
+ stream_update->gamut_remap ||
+ stream_update->output_color_space ||
+ stream_update->dither_option ||
+ stream_update->wb_update ||
+ stream_update->dsc_config ||
+ stream_update->mst_bw_update ||
+ stream_update->func_shaper ||
+ stream_update->lut3d_func ||
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust ||
+ stream_update->scaler_sharpener_update ||
+ stream_update->hw_cursor_req))
+ return true;
+
+ return false;
+}
+
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ return fast_updates_exist(fast_update, surface_count)
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
+}
+
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
+ enum surface_update_type update_type;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
@@ -5158,9 +5298,11 @@ static bool update_planes_and_stream_v2(struct dc *dc,
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
+ bool is_fast_update_only;
- struct surface_update_descriptor update_descriptor = {0};
-
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
@@ -5179,7 +5321,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
- &update_descriptor,
+ &update_type,
&context))
return false;
@@ -5189,7 +5331,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
dc_state_release(context);
return false;
}
- elevate_update_type(&update_descriptor, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ update_type = UPDATE_TYPE_FULL;
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
@@ -5198,13 +5340,13 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (update_descriptor.update_type <= UPDATE_TYPE_FAST) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
- update_descriptor.update_type,
+ update_type,
context);
} else {
if (!stream_update &&
@@ -5220,7 +5362,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
- update_descriptor.update_type,
+ update_type,
context);
}
if (dc->current_state != context)
@@ -5234,8 +5376,14 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
struct dc_stream_update *stream_update,
enum surface_update_type update_type)
{
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
ASSERT(update_type < UPDATE_TYPE_FULL);
- if (update_type <= UPDATE_TYPE_FAST)
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5326,7 +5474,7 @@ static bool update_planes_and_stream_v3(struct dc *dc,
struct dc_stream_update *stream_update)
{
struct dc_state *new_context;
- struct surface_update_descriptor update_descriptor = {0};
+ enum surface_update_type update_type;
/*
* When this function returns true and new_context is not equal to
@@ -5338,26 +5486,22 @@ static bool update_planes_and_stream_v3(struct dc *dc,
* replaced by a newer context. Refer to the use of
* swap_and_free_current_context below.
*/
- if (!update_planes_and_stream_state(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- &update_descriptor,
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
&new_context))
return false;
if (new_context == dc->current_state) {
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_descriptor.update_type);
+ stream_update, update_type);
if (dc->check_config.transition_countdown_to_steady_state)
dc->check_config.transition_countdown_to_steady_state--;
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
- stream_update, update_descriptor.update_type, new_context);
+ stream_update, update_type, new_context);
}
return true;
@@ -5407,6 +5551,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
+ (void)state;
bool ret = false;
dc_exit_ips_for_hw_access(dc);
@@ -5716,6 +5861,7 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
{
+ (void)apply;
struct dc_state *context = dc->current_state;
struct hubp *hubp;
struct pipe_ctx *pipe;
@@ -6260,8 +6406,7 @@ void dc_disable_accelerated_mode(struct dc *dc)
*/
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
{
- int i;
- int edp_num;
+ unsigned int i, edp_num;
struct pipe_ctx *pipe = NULL;
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
@@ -6315,8 +6460,7 @@ bool dc_abm_save_restore(
struct dc_stream_state *stream,
struct abm_save_restore *pData)
{
- int i;
- int edp_num;
+ unsigned int i, edp_num;
struct pipe_ctx *pipe = NULL;
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
@@ -6392,6 +6536,7 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
bool powerOn)
{
+ (void)dc;
if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
return;
@@ -6518,6 +6663,7 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
struct power_features *out_data)
{
+ (void)primary_otg_inst;
out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support;
out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
}
@@ -7131,7 +7277,7 @@ struct dc_update_scratch_space {
struct dc_stream_update *stream_update;
bool update_v3;
bool do_clear_update_flags;
- struct surface_update_descriptor update_descriptor;
+ enum surface_update_type update_type;
struct dc_state *new_context;
enum update_v3_flow flow;
struct dc_state *backup_context;
@@ -7214,28 +7360,45 @@ static bool update_planes_and_stream_prepare_v3(
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
+ /* HWSS path determination needs to be done prior to updating the surface and stream states. */
+ struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
+
+ populate_fast_updates(fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update);
+
+ const bool is_hwss_fast_path_only =
+ fast_update_only(scratch->dc,
+ fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update,
+ scratch->stream) &&
+ !scratch->dc->check_config.enable_legacy_fast_update;
+
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- &scratch->update_descriptor,
+ &scratch->update_type,
&scratch->new_context
)) {
return false;
}
if (scratch->new_context == scratch->dc->current_state) {
- ASSERT(scratch->update_descriptor.update_type < UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
- scratch->flow = scratch->update_descriptor.update_type <= UPDATE_TYPE_FAST
+ scratch->flow = is_hwss_fast_path_only
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
}
- ASSERT(scratch->update_descriptor.update_type >= UPDATE_TYPE_FULL);
+ ASSERT(scratch->update_type >= UPDATE_TYPE_FULL);
const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless(
scratch->dc,
@@ -7308,7 +7471,7 @@ static void update_planes_and_stream_execute_v3_commit(
intermediate_update ? scratch->intermediate_count : scratch->surface_count,
scratch->stream,
use_stream_update ? scratch->stream_update : NULL,
- intermediate_context ? UPDATE_TYPE_FULL : scratch->update_descriptor.update_type,
+ intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type,
// `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context`
intermediate_context ? scratch->intermediate_context : scratch->new_context
);
@@ -7326,7 +7489,7 @@ static void update_planes_and_stream_execute_v3(
scratch->surface_count,
scratch->stream,
scratch->stream_update,
- scratch->update_descriptor.update_type,
+ scratch->update_type,
scratch->new_context
);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 5b3695e72e19..db86e346307c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -249,6 +249,7 @@ void color_space_to_black_color(
enum dc_color_space colorspace,
struct tg_color *black_color)
{
+ (void)dc;
switch (colorspace) {
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR709:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 7bb4504889be..f4e99ca7918f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -46,7 +46,7 @@ struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
void dc_get_edp_links(const struct dc *dc,
struct dc_link **edp_links,
- int *edp_num)
+ unsigned int *edp_num)
{
int i;
@@ -68,7 +68,7 @@ bool dc_get_edp_link_panel_inst(const struct dc *dc,
unsigned int *inst_out)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num, i;
+ unsigned int edp_num, i;
*inst_out = 0;
if (link->connector_signal != SIGNAL_TYPE_EDP)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 727bcf08a84f..66597a1f5b78 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1748,6 +1748,7 @@ enum dc_status resource_build_scaling_params_for_context(
const struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1825,6 +1826,7 @@ int resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master(
struct resource_context *new_res_ctx,
const struct pipe_ctx *cur_otg_master)
{
+ (void)cur_res_ctx;
const struct pipe_ctx *cur_sec_opp_head = cur_otg_master->next_odm_pipe;
struct pipe_ctx *new_pipe;
int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
@@ -1846,6 +1848,7 @@ int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
struct resource_context *new_res_ctx,
const struct pipe_ctx *cur_opp_head)
{
+ (void)cur_res_ctx;
const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe;
struct pipe_ctx *new_pipe;
int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
@@ -2941,6 +2944,7 @@ enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ct
const struct resource_pool *pool,
struct dc_stream_state *stream)
{
+ (void)pool;
struct dc *dc = stream->ctx->dc;
return dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
@@ -3023,6 +3027,7 @@ static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state,
struct dc_state *context)
{
+ (void)context;
struct pipe_ctx *opp_head_pipe = otg_master_pipe;
while (opp_head_pipe) {
@@ -3615,6 +3620,7 @@ static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for
const struct resource_pool *pool,
struct dc_stream_state *stream)
{
+ (void)stream;
int i;
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
@@ -3634,6 +3640,7 @@ static struct audio *find_first_free_audio(
enum engine_id id,
enum dce_version dc_version)
{
+ (void)dc_version;
int i, available_audio_count;
if (id == ENGINE_ID_UNKNOWN)
@@ -5240,7 +5247,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
return 64;
default:
ASSERT_CRITICAL(false);
- return -1;
+ return UINT_MAX;
}
}
static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index a40e5c44143f..40f7aa732258 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -205,19 +205,33 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC;
#ifdef CONFIG_DRM_AMD_DC_FP
+ bool status;
+
if (dc->debug.using_dml2) {
- if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
+ DC_FP_START();
+ status = dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+ DC_FP_END();
+
+ if (!status) {
dc_state_release(state);
return NULL;
}
- if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
- dc_state_release(state);
- return NULL;
+ if (dc->caps.dcmode_power_limits_present) {
+ bool status;
+
+ DC_FP_START();
+ status = dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source);
+ DC_FP_END();
+
+ if (!status) {
+ dc_state_release(state);
+ return NULL;
+ }
}
- }
-#endif
+ }
+#endif // CONFIG_DRM_AMD_DC_FP
kref_init(&state->refcount);
return state;
@@ -235,14 +249,20 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
#ifdef CONFIG_DRM_AMD_DC_FP
dst_state->bw_ctx.dml2 = dst_dml2;
- if (src_state->bw_ctx.dml2)
+ if (src_state->bw_ctx.dml2) {
+ DC_FP_START();
dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+ DC_FP_END();
+ }
dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source;
- if (src_state->bw_ctx.dml2_dc_power_source)
- dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source);
-#endif
+ if (src_state->bw_ctx.dml2_dc_power_source) {
+ DC_FP_START();
+ dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source);
+ DC_FP_END();
+ }
+#endif // CONFIG_DRM_AMD_DC_FP
/* context refcount should not be overridden */
dst_state->refcount = refcount;
}
@@ -258,22 +278,35 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_copy_internal(new_state, src_state);
#ifdef CONFIG_DRM_AMD_DC_FP
+ bool status;
+
new_state->bw_ctx.dml2 = NULL;
new_state->bw_ctx.dml2_dc_power_source = NULL;
- if (src_state->bw_ctx.dml2 &&
- !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
- dc_state_release(new_state);
- return NULL;
- }
+ if (src_state->bw_ctx.dml2) {
+ DC_FP_START();
+ status = dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+ DC_FP_END();
- if (src_state->bw_ctx.dml2_dc_power_source &&
- !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) {
- dc_state_release(new_state);
- return NULL;
+ if (!status) {
+ dc_state_release(new_state);
+ return NULL;
+ }
}
-#endif
+
+ if (src_state->bw_ctx.dml2_dc_power_source) {
+ DC_FP_START();
+ status = dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source,
+ src_state->bw_ctx.dml2_dc_power_source);
+ DC_FP_END();
+
+ if (!status) {
+ dc_state_release(new_state);
+ return NULL;
+ }
+ }
+#endif // CONFIG_DRM_AMD_DC_FP
kref_init(&new_state->refcount);
return new_state;
@@ -351,11 +384,13 @@ static void dc_state_free(struct kref *kref)
dc_state_destruct(state);
#ifdef CONFIG_DRM_AMD_DC_FP
+ DC_FP_START();
dml2_destroy(state->bw_ctx.dml2);
state->bw_ctx.dml2 = 0;
dml2_destroy(state->bw_ctx.dml2_dc_power_source);
state->bw_ctx.dml2_dc_power_source = 0;
+ DC_FP_END();
#endif
kvfree(state);
@@ -374,6 +409,7 @@ enum dc_status dc_state_add_stream(
struct dc_state *state,
struct dc_stream_state *stream)
{
+ (void)dc;
enum dc_status res;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -749,6 +785,7 @@ struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane)
{
+ (void)main_plane;
struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
DC_LOGGER_INIT(dc->ctx->logger);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index daa7ab362239..e16de323f39c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -42,6 +42,13 @@
#define MAX(x, y) ((x > y) ? x : y)
#endif
+#include "dc_fpu.h"
+
+#if !defined(DC_RUN_WITH_PREEMPTION_ENABLED)
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !DC_RUN_WITH_PREEMPTION_ENABLED
+
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -170,12 +177,14 @@ struct dc_stream_state *dc_create_stream_for_sink(
if (sink == NULL)
goto fail;
- stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC);
+ DC_RUN_WITH_PREEMPTION_ENABLED(stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC));
if (stream == NULL)
goto fail;
- stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_ATOMIC);
+ DC_RUN_WITH_PREEMPTION_ENABLED(stream->update_scratch =
+ kzalloc((int32_t) dc_update_scratch_space_size(),
+ GFP_ATOMIC));
if (stream->update_scratch == NULL)
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 5f12dcca7f71..a59b176d8e55 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -57,6 +57,7 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
void dc_plane_destruct(struct dc_plane_state *plane_state)
{
+ (void)plane_state;
// no more pointers to free within dc_plane_state
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 80e217c5a23d..55ec281db3b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
-#define DC_VER "3.2.375"
+#define DC_VER "3.2.376"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -467,7 +467,6 @@ struct dc_static_screen_params {
*/
enum surface_update_type {
- UPDATE_TYPE_ADDR_ONLY, /* only surface address is being updated, no other programming needed */
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
UPDATE_TYPE_FULL, /* may need to shuffle resources */
@@ -521,7 +520,7 @@ struct dc_config {
union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
- bool enable_4to1MPC;
+ bool allow_4to1MPC;
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
@@ -563,7 +562,6 @@ struct dc_config {
bool frame_update_cmd_version2;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
- bool no_native422_support;
};
enum visual_confirm {
@@ -988,6 +986,7 @@ struct link_service;
* causing an issue or not.
*/
struct dc_debug_options {
+ bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
int visual_confirm_rect_height;
@@ -1881,6 +1880,20 @@ struct dc_scaling_info {
struct scaling_taps scaling_quality;
};
+struct dc_fast_update {
+ const struct dc_flip_addrs *flip_addr;
+ const struct dc_gamma *gamma;
+ const struct colorspace_transform *gamut_remap_matrix;
+ const struct dc_csc_transform *input_csc_color_matrix;
+ const struct fixed31_32 *coeff_reduction_factor;
+ struct dc_transfer_func *out_transfer_func;
+ struct dc_csc_transform *output_csc_transform;
+ const struct dc_csc_transform *cursor_csc_color_matrix;
+#if defined(CONFIG_DRM_AMD_DC_DCN4_2)
+ struct cm_hist_control *cm_hist_control;
+#endif
+};
+
struct dc_surface_update {
struct dc_plane_state *surface;
@@ -2019,7 +2032,12 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
- /*
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
+void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update);
+/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
@@ -2062,7 +2080,7 @@ bool dc_get_edp_link_panel_inst(const struct dc *dc,
/* Return an array of link pointers to edp links. */
void dc_get_edp_links(const struct dc *dc,
struct dc_link **edp_links,
- int *edp_num);
+ unsigned int *edp_num);
void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
bool powerOn);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index e5a222425814..79c8b4cab053 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -958,7 +958,10 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
uint32_t i;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ if (!dc_dmub_srv)
+ return;
+
+ if (!dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
@@ -1082,6 +1085,7 @@ static void dc_build_cursor_attribute_update_payload1(
struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
const struct hubp *hubp, const struct dpp *dpp)
{
+ (void)p_idx;
/* Hubp */
pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
@@ -1163,7 +1167,10 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ if (!dc_dmub_srv)
+ return;
+
+ if (!dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 101bce6b8de6..9d18f1c08079 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -52,7 +52,6 @@ struct dc_dsc_policy {
uint32_t max_target_bpp;
uint32_t min_target_bpp;
bool enable_dsc_when_not_needed;
- bool ycbcr422_simple;
};
struct dc_dsc_config_options {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 04b8b798dfff..77299767096f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -108,6 +108,7 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
va_list ap)
{
+ (void)addr;
uint32_t shift, mask, field_value;
int i = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 7f58acfe1177..a72bf413fad6 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -165,6 +165,7 @@ void dccg31_set_dpstreamclk(
int otg_inst,
int dp_hpo_inst)
{
+ (void)dp_hpo_inst;
if (src == REFCLK)
dccg31_disable_dpstreamclk(dccg, otg_inst);
else
@@ -644,6 +645,7 @@ void dccg31_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
+ (void)dccg;
/*
* Assume refclk is sourced from xtalin
* expect 24MHz
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
index e817cd7c2b6a..18b9c5ceed43 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
@@ -265,6 +265,7 @@ static void dccg32_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
+ (void)dccg;
/*
* Assume refclk is sourced from xtalin
* expect 100MHz
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 0b7908fbb115..efac64165ccd 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -558,6 +558,7 @@ static void dccg35_set_symclk32_se_src_new(
static int
dccg35_is_symclk32_se_src_functional_le_new(struct dccg *dccg, int symclk_32_se_inst, int symclk_32_le_inst)
{
+ (void)symclk_32_se_inst;
uint32_t en;
uint32_t src_sel;
@@ -2373,6 +2374,7 @@ static void dccg35_disable_symclk_se_cb(
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
{
+ (void)link_enc_inst;
dccg35_disable_symclk_fe_new(dccg, stream_enc_inst);
/* DMU PHY sequence switches SYMCLK_BE (link_enc_inst) to ref clock once PHY is turned off */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 4b9a14c679d3..97605a416031 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -161,6 +161,7 @@ void dccg401_set_pixel_rate_div(
enum pixel_rate_div tmds_div,
enum pixel_rate_div unused)
{
+ (void)unused;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA;
uint32_t dp_dto_int;
@@ -353,6 +354,7 @@ void dccg401_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
+ (void)dccg;
/*
* Assume refclk is sourced from xtalin
* expect 100MHz
@@ -526,10 +528,6 @@ static void dccg401_enable_dpstreamclk(struct dccg *dccg, int otg_inst, int dp_h
BREAK_TO_DEBUGGER();
return;
}
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- DPSTREAMCLK_GATE_DISABLE, 1,
- DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
}
void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
@@ -725,6 +723,7 @@ void dccg401_init(struct dccg *dccg)
void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
+ (void)num_slices_h;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (inst) {
@@ -842,6 +841,7 @@ void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint3
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
+ (void)link_enc_inst;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (stream_enc_inst) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
index 19dfc3fe5c3a..b813310763e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
@@ -186,6 +186,7 @@ void dccg42_set_pixel_rate_div(
enum pixel_rate_div tmds_div,
enum pixel_rate_div unused)
{
+ (void)unused;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA;
uint32_t dp_dto_int;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 41169b42534c..469b4b8f88a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -57,6 +57,7 @@
static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
{
+ (void)panel_inst;
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 0807d20985c7..77df61bfaf27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -350,6 +350,7 @@ static uint32_t calculate_required_audio_bw_in_symbols(
uint32_t av_stream_map_lane_count,
uint32_t audio_sdp_overhead)
{
+ (void)channel_count;
/* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
@@ -1027,6 +1028,7 @@ static void get_azalia_clock_info_hdmi(
uint32_t actual_pixel_clock_100Hz,
struct azalia_clock_info *azalia_clock_info)
{
+ (void)crtc_pixel_clock_100hz;
/* audio_dto_phase= 24 * 10,000;
* 24MHz in [100Hz] units */
azalia_clock_info->audio_dto_phase =
@@ -1043,6 +1045,7 @@ static void get_azalia_clock_info_dp(
const struct audio_pll_info *pll_info,
struct azalia_clock_info *azalia_clock_info)
{
+ (void)requested_pixel_clock_100Hz;
/* Reported dpDtoSourceClockInkhz value for
* DCE8 already adjusted for SS, do not need any
* adjustment here anymore
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5722be965422..34e54fdb9d13 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -539,6 +539,7 @@ static void dce112_get_pix_clk_dividers_helper (
struct pll_settings *pll_settings,
struct pixel_clk_params *pix_clk_params)
{
+ (void)clk_src;
uint32_t actual_pixel_clock_100hz;
actual_pixel_clock_100hz = pix_clk_params->requested_pix_clk_100hz;
@@ -610,7 +611,7 @@ static uint32_t dce112_get_pix_clk_dividers(
|| pix_clk_params->requested_pix_clk_100hz == 0) {
DC_LOG_ERROR(
"%s: Invalid parameters!!\n", __func__);
- return -1;
+ return (uint32_t)-1;
}
memset(pll_settings, 0, sizeof(*pll_settings));
@@ -621,7 +622,7 @@ static uint32_t dce112_get_pix_clk_dividers(
pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10;
pll_settings->actual_pix_clk_100hz =
pix_clk_params->requested_pix_clk_100hz;
- return -1;
+ return (uint32_t)-1;
}
dce112_get_pix_clk_dividers_helper(clk_src,
@@ -847,6 +848,7 @@ static bool dce110_program_pix_clk(
enum dp_link_encoding encoding,
struct pll_settings *pll_settings)
{
+ (void)encoding;
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
@@ -921,6 +923,7 @@ static bool dce112_program_pix_clk(
enum dp_link_encoding encoding,
struct pll_settings *pll_settings)
{
+ (void)encoding;
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
@@ -1070,6 +1073,7 @@ static bool dcn401_program_pix_clk(
enum dp_link_encoding encoding,
struct pll_settings *pll_settings)
{
+ (void)encoding;
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
const struct pixel_rate_range_table_entry *e =
@@ -1376,7 +1380,7 @@ static uint32_t dcn3_get_pix_clk_dividers(
|| pix_clk_params->requested_pix_clk_100hz == 0) {
DC_LOG_ERROR(
"%s: Invalid parameters!!\n", __func__);
- return -1;
+ return UINT_MAX;
}
memset(pll_settings, 0, sizeof(*pll_settings));
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index e871b72e43ef..25ebd8a52ae4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -71,6 +71,7 @@ static const uint32_t abm_gain_stepsize = 0x0060;
static bool dce_dmcu_init(struct dmcu *dmcu)
{
+ (void)dmcu;
// Do nothing
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
index f5cd2392fc5f..f5261e8d7678 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
@@ -31,6 +31,7 @@ bool dce_i2c_oem_device_present(
size_t slave_address
)
{
+ (void)pool;
struct dc *dc = ddc->ctx->dc;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct graphics_object_id id = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 365dd2e37aea..fe239a96121e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -69,6 +69,7 @@ static enum i2c_channel_operation_result get_channel_status(
struct dce_i2c_hw *dce_i2c_hw,
uint8_t *returned_bytes)
{
+ (void)returned_bytes;
uint32_t i2c_sw_status = 0;
uint32_t value =
REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
@@ -631,6 +632,7 @@ bool dce_i2c_submit_command_hw(
struct i2c_command *cmd,
struct dce_i2c_hw *dce_i2c_hw)
{
+ (void)ddc;
uint8_t index_of_payload = 0;
bool result;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index 2d73b94c515c..52e05b9185f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -67,6 +67,7 @@ static void release_engine_dce_sw(
struct resource_pool *pool,
struct dce_i2c_sw *dce_i2c_sw)
{
+ (void)pool;
dal_ddc_close(dce_i2c_sw->ddc);
dce_i2c_sw->ddc = NULL;
}
@@ -76,6 +77,7 @@ static bool wait_for_scl_high_sw(
struct ddc *ddc,
uint16_t clock_delay_div_4)
{
+ (void)ctx;
uint32_t scl_retry = 0;
uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
@@ -469,6 +471,7 @@ bool dce_i2c_submit_command_sw(
struct i2c_command *cmd,
struct dce_i2c_sw *dce_i2c_sw)
{
+ (void)ddc;
uint8_t index_of_payload = 0;
bool result;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
index 34bff9aef66c..ee55ec21d270 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -43,6 +43,7 @@ static void dce_ipp_cursor_set_position(
const struct dc_cursor_position *position,
const struct dc_cursor_mi_param *param)
{
+ (void)param;
struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
/* lock cursor registers */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index a368802ba51d..5f40ae9e3120 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -847,6 +847,7 @@ bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing)
{
+ (void)enc110;
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 1c2009e38aa1..168c2d0a5eaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -317,6 +317,7 @@ static void dce_mi_program_display_marks(
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
+ (void)stutter_enter;
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
@@ -370,6 +371,7 @@ static void dce112_mi_program_display_marks(struct mem_input *mi,
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
+ (void)stutter_entry;
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
@@ -656,6 +658,8 @@ static void dce_mi_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
+ (void)dcc;
+ (void)horizontal_mirror;
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index f342da5a5e50..61d478cfca6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -600,6 +600,7 @@ void dce110_opp_set_dyn_expansion(
enum dc_color_depth color_dpth,
enum signal_type signal)
{
+ (void)color_sp;
struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 87c19f17c799..ed407e779c12 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -271,6 +271,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
+ (void)use_vsc_sdp_for_colorimetry;
+ (void)enable_sdp_splitting;
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -901,6 +903,7 @@ static void dce110_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
+ (void)link;
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -951,6 +954,7 @@ static void dce110_stream_encoder_dp_unblank(
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
+ (void)link;
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 1ab5ae9b5ea5..c1448ae47366 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -282,6 +282,7 @@ static void calculate_inits(
const struct scaler_data *data,
struct scl_ratios_inits *inits)
{
+ (void)xfm_dce;
struct fixed31_32 h_init;
struct fixed31_32 v_init;
@@ -1240,6 +1241,7 @@ static void program_color_matrix(
const struct out_csc_color_matrix *tbl_entry,
enum grph_color_adjust_option options)
{
+ (void)options;
{
REG_SET_2(OUTPUT_CSC_C11_C12, 0,
OUTPUT_CSC_C11, tbl_entry->regval[0],
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index b686d89b79b2..93550c5e4d02 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -41,8 +41,7 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
{
struct dc_context *dc = abm->ctx;
struct dc_link *edp_links[MAX_NUM_EDP];
- int i;
- int edp_num;
+ unsigned int i, edp_num;
unsigned int ret = ABM_FEATURE_NO_SUPPORT;
dc_get_edp_links(dc->dc, edp_links, &edp_num);
@@ -174,6 +173,7 @@ static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm,
unsigned int controller_id,
unsigned int panel_inst)
{
+ (void)controller_id;
bool ret = false;
unsigned int feature_support;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index a641ae04450c..806b5709c9e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -188,6 +188,7 @@ void dmub_abm_init_config(struct abm *abm,
bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
{
+ (void)stream_inst;
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
uint8_t panel_mask = 0x01 << panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d0ffa99f1fe0..52673e2f504c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -82,7 +82,7 @@ bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct d
if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ unsigned int edp_num;
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 1)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 28a218149b8b..0af1b8e0a49e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -216,6 +216,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
uint8_t panel_inst,
uint16_t frame_skip_number)
{
+ (void)panel_inst;
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
struct dmub_rb_cmd_replay_set_coasting_vtotal *pCmd = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 2c43c2422638..b265a72eeb70 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -40,6 +40,7 @@ static void set_flip_control(
struct dce_mem_input *mem_input110,
bool immediate)
{
+ (void)immediate;
uint32_t value = 0;
value = dm_read_reg(
@@ -165,6 +166,7 @@ static void program_tiling(
const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ (void)pixel_format;
uint32_t value = 0;
set_reg_field_value(value, info->gfx8.num_banks,
@@ -642,6 +644,8 @@ static void dce_mem_input_v_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizotal_mirror)
{
+ (void)dcc;
+ (void)horizotal_mirror;
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
enable(mem_input110);
@@ -927,6 +931,7 @@ static void dce_mem_input_v_program_display_marks(
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
+ (void)stutter_enter;
program_urgency_watermark_l(
mem_input->ctx,
urgent,
@@ -970,6 +975,9 @@ static void dce110_allocate_mem_input_v(
uint32_t pix_clk_khz,/* for current stream */
uint32_t total_stream_num)
{
+ (void)h_total;
+ (void)v_total;
+ (void)total_stream_num;
uint32_t addr;
uint32_t value;
uint32_t pix_dur;
@@ -1009,6 +1017,8 @@ static void dce110_free_mem_input_v(
struct mem_input *mi,
uint32_t total_stream_num)
{
+ (void)mi;
+ (void)total_stream_num;
}
static const struct mem_input_funcs dce110_mem_input_v_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index e096d2b95ef9..cf63fac82832 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -110,6 +110,7 @@ static void program_color_matrix_v(
const struct out_csc_color_matrix *tbl_entry,
enum grph_color_adjust_option options)
{
+ (void)options;
struct dc_context *ctx = xfm_dce->base.ctx;
uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL);
bool use_set_a = (get_reg_field_value(cntl_value,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
index 9b65b77e8823..a4e76db46c9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -551,5 +551,7 @@ void dce110_opp_set_regamma_mode_v(
struct transform *xfm,
enum opp_regamma mode)
{
+ (void)xfm;
+ (void)mode;
// TODO: need to implement the function
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 61b0807693fb..b015b27cd1c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -66,6 +66,7 @@ static void dce110_timing_generator_apply_front_porch_workaround(
struct timing_generator *tg,
struct dc_crtc_timing *timing)
{
+ (void)tg;
if (timing->flags.INTERLACE == 1) {
if (timing->v_front_porch < 2)
timing->v_front_porch = 2;
@@ -1115,6 +1116,7 @@ bool dce110_timing_generator_validate_timing(
const struct dc_crtc_timing *timing,
enum signal_type signal)
{
+ (void)signal;
uint32_t h_blank;
uint32_t h_back_porch, hsync_offset, h_sync_start;
@@ -1490,6 +1492,7 @@ void dce110_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
int source_tg_inst)
{
+ (void)source_tg_inst;
uint32_t value;
uint32_t rising_edge = 0;
uint32_t falling_edge = 0;
@@ -1959,6 +1962,12 @@ void dce110_tg_program_timing(struct timing_generator *tg,
const enum signal_type signal,
bool use_vbios)
{
+ (void)vready_offset;
+ (void)vstartup_start;
+ (void)vupdate_offset;
+ (void)vupdate_width;
+ (void)pstate_keepout;
+ (void)signal;
if (use_vbios)
dce110_timing_generator_program_timing_generator(tg, timing);
else
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 9837dec837ff..ba22c93acd81 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -442,6 +442,12 @@ static void dce110_timing_generator_v_program_timing(struct timing_generator *tg
const enum signal_type signal,
bool use_vbios)
{
+ (void)vready_offset;
+ (void)vstartup_start;
+ (void)vupdate_offset;
+ (void)vupdate_width;
+ (void)pstate_keepout;
+ (void)signal;
if (use_vbios)
dce110_timing_generator_program_timing_generator(tg, timing);
else
@@ -621,6 +627,7 @@ static void dce110_timing_generator_v_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
{
+ (void)gsl_params;
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
@@ -629,6 +636,7 @@ static void dce110_timing_generator_v_enable_reset_trigger(
struct timing_generator *tg,
int source_tg_inst)
{
+ (void)source_tg_inst;
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
@@ -650,6 +658,7 @@ static void dce110_timing_generator_v_tear_down_global_swap_lock(
static void dce110_timing_generator_v_disable_vga(
struct timing_generator *tg)
{
+ (void)tg;
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index 28d3b2663cd3..6be18665b1f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -371,6 +371,9 @@ static void calculate_inits(
struct rect *luma_viewport,
struct rect *chroma_viewport)
{
+ (void)xfm_dce;
+ (void)luma_viewport;
+ (void)chroma_viewport;
inits->h_int_scale_ratio_luma =
dc_fixpt_u2d19(data->ratios.horz) << 5;
inits->v_int_scale_ratio_luma =
@@ -619,6 +622,8 @@ static void dce110_xfmv_set_gamut_remap(
struct transform *xfm,
const struct xfm_grph_csc_adjustment *adjust)
{
+ (void)xfm;
+ (void)adjust;
/* DO NOTHING*/
}
@@ -627,6 +632,7 @@ static void dce110_xfmv_set_pixel_storage_depth(
enum lb_pixel_depth depth,
const struct bit_depth_reduction_params *bit_depth_params)
{
+ (void)bit_depth_params;
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
int pixel_depth = 0;
int expan_mode = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
index 187f45a7f5e1..fe97d3946cab 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -284,6 +284,7 @@ static uint32_t align_to_chunks_number_per_line(
struct dce112_compressor *cp110,
uint32_t pixels)
{
+ (void)cp110;
return 256 * ((pixels + 255) / 256);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 31c4f44ceaac..70410ef0c291 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -304,6 +304,7 @@ static void dce120_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
int source)
{
+ (void)source;
enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t rising_edge = 0;
@@ -701,6 +702,12 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
const enum signal_type signal,
bool use_vbios)
{
+ (void)vready_offset;
+ (void)vstartup_start;
+ (void)vupdate_offset;
+ (void)vupdate_width;
+ (void)pstate_keepout;
+ (void)signal;
if (use_vbios)
dce110_timing_generator_program_timing_generator(tg, timing);
else
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 88e7a1fc9a30..53c03364f5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -115,6 +115,12 @@ static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const enum signal_type signal,
bool use_vbios)
{
+ (void)vready_offset;
+ (void)vstartup_start;
+ (void)vupdate_offset;
+ (void)vupdate_width;
+ (void)pstate_keepout;
+ (void)signal;
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index dcd2cdfe91eb..c702a30563f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -381,10 +381,10 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
+ seg_distr[i] = (uint32_t)-1;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
+ if (seg_distr[k] != (uint32_t)-1)
hw_points += (1 << seg_distr[k]);
}
@@ -565,7 +565,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
+ seg_distr[i] = (uint32_t)-1;
/* 12 segments
* segments are from 2^-12 to 0
*/
@@ -573,7 +573,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
seg_distr[i] = 4;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
+ if (seg_distr[k] != (uint32_t)-1)
hw_points += (1 << seg_distr[k]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index 365a3215f6d5..e9efbb49586e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -44,6 +44,7 @@
static bool dwb1_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
{
+ (void)dwbc;
if (caps) {
caps->adapter_id = 0; /* we only support 1 adapter currently */
caps->hw_version = DCN_VERSION_1_0;
@@ -63,6 +64,7 @@ static bool dwb1_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
static bool dwb1_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
{
+ (void)params;
struct dcn10_dwbc *dwbc10 = TO_DCN10_DWBC(dwbc);
/* disable first. */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index a0d437f0ce2b..f73c5f42ea68 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -746,7 +746,7 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
src_width, dest_width);
if (dc_fixpt_floor(tmp_h_ratio_luma) == 8)
- h_ratio_luma = -1;
+ h_ratio_luma = (uint32_t)-1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
@@ -824,7 +824,7 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
src_height, dest_height);
if (dc_fixpt_floor(tmp_v_ratio_luma) == 8)
- v_ratio_luma = -1;
+ v_ratio_luma = (uint32_t)-1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index 227aa8672d17..9dbccf58dde5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -159,10 +159,10 @@ bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx,
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
+ seg_distr[i] = (uint32_t)-1;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
+ if (seg_distr[k] != (uint32_t)-1)
hw_points += (1 << seg_distr[k]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
index 05aac3e444b4..4c7e4fe3c680 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -77,6 +77,7 @@ static void apg31_se_audio_setup(
unsigned int az_inst,
struct audio_info *audio_info)
{
+ (void)az_inst;
struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
ASSERT(audio_info);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index d928b4dcf6b8..d913f065ecca 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -249,6 +249,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
+ (void)enable_sdp_splitting;
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -783,6 +784,7 @@ void enc1_stream_encoder_send_immediate_sdp_message(
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
+ (void)sdp_message_size;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 3e85e9c3d2cb..d1fd5462dca5 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -394,6 +394,8 @@ void enc314_dp_set_dsc_config(struct stream_encoder *enc,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width)
{
+ (void)dsc_bytes_per_pixel;
+ (void)dsc_slice_width;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 3523d1cdc1a3..edafa3808455 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -356,6 +356,8 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width)
{
+ (void)dsc_bytes_per_pixel;
+ (void)dsc_slice_width;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index fd5d1dbf9dc6..de24dcd27e6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -369,6 +369,7 @@ static void enc35_stream_encoder_map_to_link(
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
{
+ (void)stream_enc_inst;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
ASSERT(stream_enc_inst < 5 && link_enc_inst < 5);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 99aab70ef3e1..2d33ed0c062d 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -57,6 +57,8 @@ static void enc401_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
{
+ (void)enc;
+ (void)odm_combine;
}
/* setup stream encoder in dvi mode */
@@ -710,6 +712,7 @@ void enc401_stream_encoder_map_to_link(
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
{
+ (void)stream_enc_inst;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(STREAM_MAPPER_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c
index 2655bc194a35..5b2bba0eff0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c
@@ -30,52 +30,92 @@
static bool virtual_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
- const struct dc_stream_state *stream) { return true; }
+ const struct dc_stream_state *stream) {
+ (void)enc;
+ (void)stream;
+ return true;
+ }
-static void virtual_link_encoder_hw_init(struct link_encoder *enc) {}
+static void virtual_link_encoder_hw_init(struct link_encoder *enc)
+{
+ (void)enc;
+}
static void virtual_link_encoder_setup(
- struct link_encoder *enc,
- enum signal_type signal) {}
+ struct link_encoder *enc, enum signal_type signal) {
+ (void)enc;
+ (void)signal;
+ }
static void virtual_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
enum signal_type signal,
- uint32_t pixel_clock) {}
+ uint32_t pixel_clock) {
+ (void)enc;
+ (void)clock_source;
+ (void)color_depth;
+ (void)signal;
+ (void)pixel_clock;
+ }
static void virtual_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum clock_source_id clock_source) {}
+ enum clock_source_id clock_source) {
+ (void)enc;
+ (void)link_settings;
+ (void)clock_source;
+ }
static void virtual_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum clock_source_id clock_source) {}
+ enum clock_source_id clock_source) {
+ (void)enc;
+ (void)link_settings;
+ (void)clock_source;
+ }
static void virtual_link_encoder_disable_output(
struct link_encoder *link_enc,
- enum signal_type signal) {}
+ enum signal_type signal) {
+ (void)link_enc;
+ (void)signal;
+ }
static void virtual_link_encoder_dp_set_lane_settings(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
- const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) {}
+ const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) {
+ (void)enc;
+ (void)link_settings;
+ (void)lane_settings;
+ }
static void virtual_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
- const struct encoder_set_dp_phy_pattern_param *param) {}
+ const struct encoder_set_dp_phy_pattern_param *param) {
+ (void)enc;
+ (void)param;
+ }
static void virtual_link_encoder_update_mst_stream_allocation_table(
struct link_encoder *enc,
- const struct link_mst_stream_allocation_table *table) {}
+ const struct link_mst_stream_allocation_table *table) {
+ (void)enc;
+ (void)table;
+ }
static void virtual_link_encoder_connect_dig_be_to_fe(
struct link_encoder *enc,
enum engine_id engine,
- bool connect) {}
+ bool connect) {
+ (void)enc;
+ (void)engine;
+ (void)connect;
+ }
static void virtual_link_encoder_destroy(struct link_encoder **enc)
{
@@ -86,6 +126,7 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
+ (void)enc;
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
diff --git a/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
index a9c8857476ac..27448f2b2467 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
@@ -31,80 +31,127 @@ static void virtual_stream_encoder_dp_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
- uint32_t enable_sdp_splitting) {}
+ uint32_t enable_sdp_splitting) {
+ (void)enc;
+ (void)crtc_timing;
+ (void)output_color_space;
+ (void)use_vsc_sdp_for_colorimetry;
+ (void)enable_sdp_splitting;
+ }
static void virtual_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
- bool enable_audio) {}
+ bool enable_audio) {
+ (void)enc;
+ (void)crtc_timing;
+ (void)actual_pix_clk_khz;
+ (void)enable_audio;
+ }
static void virtual_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- bool is_dual_link) {}
+ bool is_dual_link) {
+ (void)enc;
+ (void)crtc_timing;
+ (void)is_dual_link;
+ }
static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
- struct fixed31_32 avg_time_slots_per_mtp)
-{}
+ struct fixed31_32 avg_time_slots_per_mtp) {
+ (void)enc;
+ (void)avg_time_slots_per_mtp;
+ }
static void virtual_stream_encoder_update_hdmi_info_packets(
struct stream_encoder *enc,
- const struct encoder_info_frame *info_frame) {}
+ const struct encoder_info_frame *info_frame) {
+ (void)enc;
+ (void)info_frame;
+ }
static void virtual_stream_encoder_stop_hdmi_info_packets(
- struct stream_encoder *enc) {}
+ struct stream_encoder *enc) {
+ (void)enc;
+ }
static void virtual_stream_encoder_set_avmute(
- struct stream_encoder *enc,
- bool enable) {}
+ struct stream_encoder *enc, bool enable) {
+ (void)enc;
+ (void)enable;
+ }
static void virtual_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
- const struct encoder_info_frame *info_frame) {}
+ const struct encoder_info_frame *info_frame) {
+ (void)enc;
+ (void)info_frame;
+ }
static void virtual_stream_encoder_stop_dp_info_packets(
- struct stream_encoder *enc) {}
+ struct stream_encoder *enc) {
+ (void)enc;
+ }
static void virtual_stream_encoder_dp_blank(
struct dc_link *link,
- struct stream_encoder *enc) {}
+ struct stream_encoder *enc) {
+ (void)link;
+ (void)enc;
+ }
static void virtual_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
- const struct encoder_unblank_param *param) {}
+ const struct encoder_unblank_param *param) {
+ (void)enc;
+ (void)link;
+ (void)param;
+ }
static void virtual_audio_mute_control(
- struct stream_encoder *enc,
- bool mute) {}
+ struct stream_encoder *enc, bool mute) {
+ (void)enc;
+ (void)mute;
+ }
static void virtual_stream_encoder_reset_hdmi_stream_attribute(
- struct stream_encoder *enc)
-{}
+ struct stream_encoder *enc)
+{
+ (void)enc;
+}
static void virtual_enc_dp_set_odm_combine(
- struct stream_encoder *enc,
- bool odm_combine)
-{}
+ struct stream_encoder *enc, bool odm_combine) {
+ (void)enc;
+ (void)odm_combine;
+ }
static void virtual_dig_connect_to_otg(
- struct stream_encoder *enc,
- int tg_inst)
-{}
+ struct stream_encoder *enc, int tg_inst) {
+ (void)enc;
+ (void)tg_inst;
+ }
static void virtual_setup_stereo_sync(
- struct stream_encoder *enc,
- int tg_inst,
- bool enable)
-{}
+ struct stream_encoder *enc,
+ int tg_inst, bool enable) {
+ (void)enc;
+ (void)tg_inst;
+ (void)enable;
+ }
static void virtual_stream_encoder_set_dsc_pps_info_packet(
- struct stream_encoder *enc,
- bool enable,
- uint8_t *dsc_packed_pps,
- bool immediate_update)
-{}
+ struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ (void)enc;
+ (void)enable;
+ (void)dsc_packed_pps;
+ (void)immediate_update;
+}
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
.dp_set_odm_combine =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 74962791302f..61553e24d53e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -525,6 +525,7 @@ static void split_stream_across_pipes(
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe)
{
+ (void)res_ctx;
int pipe_idx = secondary_pipe->pipe_idx;
if (!primary_pipe->plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
index c5e84190c17a..5679b79d6f53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
@@ -76,7 +76,7 @@ struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.line_buffer_size_bits = 589824,
.max_line_buffer_lines = 12,
.IsLineBufferBppFixed = 0,
- .LineBufferFixedBpp = -1,
+ .LineBufferFixedBpp = (unsigned int)-1,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.max_num_dpp = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 7aaf13bbd4e4..887744d56d6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1316,6 +1316,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
display_e2e_pipe_params_st *pipes,
enum dc_validate_mode validate_mode)
{
+ (void)validate_mode;
int pipe_cnt, i;
bool synchronized_vblank = true;
struct resource_context *res_ctx = &context->res_ctx;
@@ -2335,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode, false);
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 0c8c4a080c50..f5f636afe33c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -501,6 +501,8 @@ static bool CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)mode_lib;
+
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
@@ -878,6 +880,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *dpte_row_height,
unsigned int *meta_row_height)
{
+ (void)ViewportWidth;
unsigned int MetaRequestHeight;
unsigned int MetaRequestWidth;
unsigned int MetaSurfWidth;
@@ -2953,6 +2956,7 @@ static double CalculateRemoteSurfaceFlipDelay(
double *TInitXFill,
double *TslvChk)
{
+ (void)mode_lib;
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index c935903b68e1..95b0a3501880 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -495,6 +495,7 @@ static bool CalculateDelayAfterScaler(
double *DSTYAfterScaler
)
{
+ (void)ReturnBW;
unsigned int DPPCycles, DISPCLKCycles;
double DataFabricLineDeliveryTimeLuma;
double DataFabricLineDeliveryTimeChroma;
@@ -592,6 +593,8 @@ static bool CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)mode_lib;
+
bool MyError = false;
double TotalRepeaterDelayTime;
double Tdm, LineTime, Tsetup;
@@ -938,6 +941,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *dpte_row_height,
unsigned int *meta_row_height)
{
+ (void)ViewportWidth;
unsigned int MetaRequestHeight;
unsigned int MetaRequestWidth;
unsigned int MetaSurfWidth;
@@ -3026,6 +3030,7 @@ static double CalculateRemoteSurfaceFlipDelay(
double *TInitXFill,
double *TslvChk)
{
+ (void)mode_lib;
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 9c58ff1069d6..591d9618bdc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -126,6 +126,7 @@ static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -1538,6 +1539,9 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
@@ -1588,6 +1592,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 570e6e39eb45..d4dddc9d535a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -126,6 +126,7 @@ static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -1539,6 +1540,9 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
@@ -1589,6 +1593,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 48905ca39b70..11570a0c9427 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -695,6 +695,9 @@ static bool CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)mode_lib;
+ (void)XFCEnabled;
+
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
@@ -1290,6 +1293,8 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
+ (void)SourcePixelFormat;
+ (void)ViewportWidth;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
@@ -3040,6 +3045,7 @@ static double CalculateRemoteSurfaceFlipDelay(
double *TInitXFill,
double *TslvChk)
{
+ (void)mode_lib;
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
@@ -3187,6 +3193,7 @@ static void CalculateFlipSchedule(
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
+ (void)mode_lib;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevels;
double TimeForFetchingMetaPTEImmediateFlip;
@@ -5294,6 +5301,15 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
{
+ (void)DPPCLK;
+ (void)SwathWidthSingleDPPY;
+ (void)DCFCLK;
+ (void)UrgentOutOfOrderReturn;
+ (void)ReturnBW;
+ (void)GPUVMEnable;
+ (void)dpte_group_bytes;
+ (void)MetaChunkSize;
+
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double DPPOutputBufferLinesY;
@@ -5885,6 +5901,9 @@ static void CalculateMetaAndPTETimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
+ (void)VRatioPrefetchY;
+ (void)VRatioPrefetchC;
+
unsigned int meta_chunk_width;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_per_row_int;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index f549da082c01..8a611b3bec33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -102,6 +102,7 @@ static double get_refcyc_per_delivery(
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -1647,6 +1648,9 @@ void dml21_rq_dlg_get_dlg_reg(
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
@@ -1702,6 +1706,7 @@ static void calculate_ttu_cursor(
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index e5f5c0663750..0cdd60869ce1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -571,6 +571,7 @@ void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
unsigned int *dcfclk_mhz,
unsigned int *dram_speed_mts)
{
+ (void)bw_params;
unsigned int i;
dc_assert_fp_enabled();
@@ -720,6 +721,7 @@ void dcn3_fpu_build_wm_range_table(struct clk_mgr *base)
void patch_dcn30_soc_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *dcn3_0_ip)
{
+ (void)dcn3_0_ip;
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 1df3412be346..634982173190 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -778,6 +778,8 @@ static bool CalculatePrefetchSchedule(
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata)
{
+ (void)SwathWidthY;
+ (void)SwathWidthC;
struct vba_vars_st *v = &mode_lib->vba;
double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
@@ -1233,6 +1235,10 @@ static void CalculateDCCConfiguration(
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
+ (void)SurfaceWidthChroma;
+ (void)SurfaceHeightChroma;
+ (void)BytePerPixelDETY;
+ (void)BytePerPixelDETC;
int yuv420 = 0;
int horz_div_l = 0;
int horz_div_c = 0;
@@ -1595,6 +1601,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
+ (void)SourcePixelFormat;
unsigned int MPDEBytesFrame = 0;
unsigned int DCCMetaSurfaceBytes = 0;
unsigned int MacroTileSizeBytes = 0;
@@ -3068,6 +3075,8 @@ double dml30_CalculateWriteBackDISPCLK(
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackVRatio;
double DISPCLK_H = 0, DISPCLK_V = 0, DISPCLK_HB = 0;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
@@ -3086,6 +3095,8 @@ static double CalculateWriteBackDelay(
long WritebackSourceHeight,
unsigned int HTotal)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackHRatio;
double CalculateWriteBackDelay = 0;
double Line_length = 0;
double Output_lines_last_notclamped = 0;
@@ -3199,6 +3210,8 @@ static void CalculateFlipSchedule(
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
+ (void)mode_lib;
+ (void)HostVMMinPageSize;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips = 0;
double TimeForFetchingMetaPTEImmediateFlip = 0;
@@ -4968,6 +4981,10 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double BytePerPixelDETC[],
enum clock_change_support *DRAMClockChangeSupport)
{
+ (void)DCFCLK;
+ (void)ReturnBW;
+ (void)DPPCLK;
+ (void)DETBufferSizeC;
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
@@ -5212,6 +5229,8 @@ static void CalculateUrgentBurstFactor(
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
+ (void)DETBufferSizeInKByte;
+ (void)VRatioC;
double LinesInDETLuma = 0;
double LinesInDETChroma = 0;
unsigned int LinesInCursorBuffer = 0;
@@ -5575,6 +5594,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
+ (void)dpte_row_width_luma_ub;
+ (void)dpte_row_width_chroma_ub;
int num_group_per_lower_vm_stage = 0;
int num_req_per_lower_vm_stage = 0;
unsigned int k;
@@ -5857,6 +5878,7 @@ static void CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
+ (void)HRatioChroma;
int MaximumSwathHeightY[DC__NUM_DPP__MAX] = { 0 };
int MaximumSwathHeightC[DC__NUM_DPP__MAX] = { 0 };
int MinimumSwathHeightY = 0;
@@ -6039,6 +6061,7 @@ static void CalculateSwathWidth(
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[])
{
+ (void)BytePerPixY;
unsigned int k, j;
long surface_width_ub_l;
long surface_height_ub_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 4fb37df54d59..472ac5ee165f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -50,6 +50,7 @@ static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -804,6 +805,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
@@ -896,6 +898,9 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ed59c77bc6f6..9833467722b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -873,6 +873,11 @@ static bool CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)mode_lib;
+ (void)HostVMMinPageSize;
+ (void)SwathWidthY;
+ (void)SwathWidthC;
+
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
@@ -1491,6 +1496,10 @@ static void CalculateDCCConfiguration(
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
+ (void)SurfaceWidthChroma;
+ (void)SurfaceHeightChroma;
+ (void)BytePerPixelDETY;
+ (void)BytePerPixelDETC;
int yuv420;
int horz_div_l;
int horz_div_c;
@@ -1823,6 +1832,7 @@ static unsigned int CalculateVMAndRowBytes(
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame)
{
+ (void)SourcePixelFormat;
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
@@ -3365,6 +3375,8 @@ double dml31_CalculateWriteBackDISPCLK(
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackVRatio;
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
@@ -3383,6 +3395,8 @@ static double CalculateWriteBackDelay(
int WritebackSourceHeight,
unsigned int HTotal)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackHRatio;
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
@@ -5566,6 +5580,9 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark)
{
+ (void)DCFCLK;
+ (void)ReturnBW;
+ (void)DETBufferSizeC;
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
@@ -5831,6 +5848,7 @@ static void CalculateUrgentBurstFactor(
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
+ (void)VRatioC;
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
@@ -6213,6 +6231,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
+ (void)dpte_row_width_luma_ub;
+ (void)dpte_row_width_chroma_ub;
int num_group_per_lower_vm_stage;
int num_req_per_lower_vm_stage;
int k;
@@ -6350,6 +6370,8 @@ static void CalculateStutterEfficiency(
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod)
{
+ (void)ConfigReturnBufferSizeInKByte;
+
struct vba_vars_st *v = &mode_lib->vba;
double DETBufferingTimeY;
@@ -6649,6 +6671,7 @@ static void CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
+ (void)HRatioChroma;
int MaximumSwathHeightY[DC__NUM_DPP__MAX];
int MaximumSwathHeightC[DC__NUM_DPP__MAX];
int MinimumSwathHeightY;
@@ -6823,6 +6846,7 @@ static void CalculateSwathWidth(
int swath_width_luma_ub[],
int swath_width_chroma_ub[])
{
+ (void)BytePerPixY;
enum odm_combine_mode MainPlaneODMCombine;
int j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index bfeb01477f0c..dfa1bc31eb0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -51,6 +51,7 @@ static double get_refcyc_per_delivery(
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -785,6 +786,7 @@ static void calculate_ttu_cursor(
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
@@ -859,6 +861,12 @@ static void dml_rq_dlg_get_dlg_params(
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)cstate_en;
+ (void)pstate_en;
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
+ (void)dlg_sys_param;
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index df9d50b9b57c..ab016c294ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -391,13 +391,9 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
}
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
- dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state
&& pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 9f3938a50240..033fde774337 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -891,6 +891,11 @@ static bool CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)mode_lib;
+ (void)HostVMMinPageSize;
+ (void)SwathWidthY;
+ (void)SwathWidthC;
+
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
@@ -1508,6 +1513,10 @@ static void CalculateDCCConfiguration(
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
+ (void)SurfaceWidthChroma;
+ (void)SurfaceHeightChroma;
+ (void)BytePerPixelDETY;
+ (void)BytePerPixelDETC;
int yuv420;
int horz_div_l;
int horz_div_c;
@@ -1840,6 +1849,7 @@ static unsigned int CalculateVMAndRowBytes(
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame)
{
+ (void)SourcePixelFormat;
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
@@ -3471,6 +3481,8 @@ double dml314_CalculateWriteBackDISPCLK(
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackVRatio;
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
@@ -3489,6 +3501,8 @@ static double CalculateWriteBackDelay(
int WritebackSourceHeight,
unsigned int HTotal)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackHRatio;
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
@@ -5660,6 +5674,9 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark)
{
+ (void)DCFCLK;
+ (void)ReturnBW;
+ (void)DETBufferSizeC;
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
@@ -5925,6 +5942,7 @@ static void CalculateUrgentBurstFactor(
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
+ (void)VRatioC;
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
@@ -6308,6 +6326,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
+ (void)dpte_row_width_luma_ub;
+ (void)dpte_row_width_chroma_ub;
int num_group_per_lower_vm_stage;
int num_req_per_lower_vm_stage;
int k;
@@ -6445,6 +6465,8 @@ static void CalculateStutterEfficiency(
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod)
{
+ (void)ConfigReturnBufferSizeInKByte;
+
struct vba_vars_st *v = &mode_lib->vba;
double DETBufferingTimeY;
@@ -6743,6 +6765,7 @@ static void CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
+ (void)HRatioChroma;
int MaximumSwathHeightY[DC__NUM_DPP__MAX];
int MaximumSwathHeightC[DC__NUM_DPP__MAX];
int MinimumSwathHeightY;
@@ -6914,6 +6937,7 @@ static void CalculateSwathWidth(
int swath_width_luma_ub[],
int swath_width_chroma_ub[])
{
+ (void)BytePerPixY;
enum odm_combine_mode MainPlaneODMCombine;
int j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 04df263ff65e..40a916c2a9c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -139,6 +139,7 @@ static double get_refcyc_per_delivery(
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -872,6 +873,7 @@ static void calculate_ttu_cursor(
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
+ (void)mode_lib;
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
@@ -944,6 +946,12 @@ static void dml_rq_dlg_get_dlg_params(
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
+ (void)cstate_en;
+ (void)pstate_en;
+ (void)vm_en;
+ (void)ignore_viewport_pos;
+ (void)immediate_flip_support;
+ (void)dlg_sys_param;
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 8a0f128722b0..e29497204df7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -3488,6 +3488,7 @@ bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context
*/
double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
{
+ (void)dc;
double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
int i;
@@ -3593,6 +3594,7 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, stru
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
{
+ (void)soc_bb;
dc_assert_fp_enabled();
dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 19b142412a84..5e72966a8daf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -457,6 +457,7 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ (void)HRatioChroma;
unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX] = { 0 };
@@ -716,6 +717,7 @@ void dml32_CalculateSwathWidth(
unsigned int swath_width_luma_ub[], // per-pipe
unsigned int swath_width_chroma_ub[]) // per-pipe
{
+ (void)BytePerPixY;
unsigned int k, j;
enum odm_combine_mode MainSurfaceODMMode;
@@ -2304,6 +2306,7 @@ unsigned int dml32_CalculateVMAndRowBytes(
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
+ (void)SourcePixelFormat;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int ExtraDPDEBytesFrame;
@@ -2745,6 +2748,7 @@ void dml32_CalculateUrgentBurstFactor(
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
+ (void)VRatioC;
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
@@ -2900,6 +2904,8 @@ double dml32_CalculateWriteBackDelay(
unsigned int WritebackSourceHeight,
unsigned int HTotal)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackHRatio;
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
@@ -2977,6 +2983,9 @@ void dml32_UseMinimumDCFCLK(
/* Output */
double DCFCLKState[][2])
{
+ (void)MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation;
+ (void)ReadBandwidthLuma;
+ (void)ReadBandwidthChroma;
unsigned int i, j, k;
unsigned int dummy1;
double dummy2, dummy3;
@@ -3447,6 +3456,8 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ (void)SwathWidthY;
+ (void)SwathWidthC;
double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
@@ -4145,6 +4156,7 @@ void dml32_CalculateFlipSchedule(
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
+ (void)HostVMMinPageSize;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
@@ -4287,6 +4299,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
bool *USRRetrainingSupport,
double ActiveDRAMClockChangeLatencyMargin[])
{
+ (void)DCFCLK;
+ (void)ReturnBW;
unsigned int i, j, k;
unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
unsigned int DRAMClockChangeSupportNumber = 0;
@@ -4655,6 +4669,8 @@ double dml32_CalculateWriteBackDISPCLK(
unsigned int WritebackLineBufferSize,
double DISPCLKDPPCLKVCOSpeed)
{
+ (void)WritebackPixelFormat;
+ (void)WritebackVRatio;
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
@@ -5166,6 +5182,8 @@ void dml32_CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
+ (void)dpte_row_width_luma_ub;
+ (void)dpte_row_width_chroma_ub;
unsigned int k;
unsigned int num_group_per_lower_vm_stage;
unsigned int num_req_per_lower_vm_stage;
@@ -5321,6 +5339,11 @@ void dml32_CalculateDCCConfiguration(
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
+ (void)SurfaceWidthChroma;
+ (void)SurfaceHeightChroma;
+ (void)TilingFormat;
+ (void)BytePerPixelDETY;
+ (void)BytePerPixelDETC;
typedef enum {
REQ_256Bytes,
REQ_128BytesNonContiguous,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 8a177d5ae213..7f40048dd67d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -202,6 +202,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
{
+ (void)clk_mgr;
//TODO
}
@@ -528,14 +529,9 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
}
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
- dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 &&
- pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) &&
+ if (!is_dual_plane(pipe->plane_state->format) &&
pipe->plane_state->src_rect.width <= 5120) {
/*
* Limit to 5k max to avoid forced pipe split when there
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 77023b619f1e..73c2aee57f28 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -561,14 +561,9 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
}
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
- dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 &&
- pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) &&
+ if (!is_dual_plane(pipe->plane_state->format) &&
pipe->plane_state->src_rect.width <= 5120) {
/*
* Limit to 5k max to avoid forced pipe split when there
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index da0cfbb071e6..684779ee54a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -162,6 +162,7 @@ void dml_log_pipe_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
+ (void)mode_lib;
display_pipe_source_params_st *pipe_src;
display_pipe_dest_params_st *pipe_dest;
scaler_ratio_depth_st *scale_ratio_depth;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 12ff65b6a7e5..3f27293a41cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -49,6 +49,8 @@ void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dp
void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
+ (void)mode_lib;
+ (void)rq_sizing;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes);
@@ -64,6 +66,8 @@ void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const st
void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param)
{
+ (void)mode_lib;
+ (void)rq_dlg_param;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
dml_print(
@@ -107,6 +111,8 @@ void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struc
void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param)
{
+ (void)mode_lib;
+ (void)rq_misc_param;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
dml_print(
@@ -124,6 +130,8 @@ void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const stru
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
+ (void)dlg_sys_param;
+ (void)mode_lib;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us);
@@ -144,6 +152,8 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v
void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs)
{
+ (void)mode_lib;
+ (void)rq_regs;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size);
@@ -179,6 +189,8 @@ void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_
void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs)
{
+ (void)dlg_regs;
+ (void)mode_lib;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
dml_print(
@@ -316,6 +328,8 @@ void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi
void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
{
+ (void)mode_lib;
+ (void)ttu_regs;
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
dml_print(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 88dc2b97e7bf..cf194bcba455 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -104,6 +104,7 @@ static double get_refcyc_per_delivery(
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
+ (void)mode_lib;
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
@@ -133,6 +134,7 @@ static double get_vratio_pre(
double vinit,
double l_sw)
{
+ (void)mode_lib;
double prefill = dml_floor(vinit, 1);
double vratio_pre = 1.0;
@@ -174,6 +176,7 @@ static void get_swath_need(
unsigned int swath_height,
double vinit)
{
+ (void)mode_lib;
double prefill = dml_floor(vinit, 1);
unsigned int max_partial_sw_int;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
index 70d9f2cd0b60..2625943d7f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -53,25 +53,29 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper_fpu.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+# Add FPU flags to all dml2 files by default, remove NO_FPU flags.
+# FPU flags step 1: Find all .c files in dal/dc/dml2_0 and it's subfolders
+DML2_ABS_PATH := $(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+DML2_C_FILES := $(shell find $(DML2_ABS_PATH) -name '*.c' -type f)
+
+# FPU flags step 2: Convert to .o and make paths relative to $(AMDDALPATH)/dc/dml2_0/
+DML2_RELATIVE_O_FILES := $(patsubst $(DML2_ABS_PATH)/%,dc/dml2_0/%,$(patsubst %.c,%.o,$(DML2_C_FILES)))
+# FPU flags step 3: Apply FPU flags to all .o files from dal/dc/dml2_0 and it's subfolders
+$(foreach obj,$(DML2_RELATIVE_O_FILES),$(eval CFLAGS_$(AMDDALPATH)/$(obj) := $(dml2_ccflags)))
+$(foreach obj,$(DML2_RELATIVE_O_FILES),$(eval CFLAGS_REMOVE_$(AMDDALPATH)/$(obj) := $(dml2_rcflags)))
+
+# FPU flags step 4: Replace CFLAGS per file for files with additional flags beyond dml2_ccflags and dml2_rcflags
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_wrapper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper_fpu.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_wrapper.o := $(dml2_ccflags)
DML2 = display_mode_core.o display_mode_util.o dml2_wrapper_fpu.o dml2_wrapper.o \
dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
@@ -81,41 +85,6 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
@@ -134,6 +103,7 @@ DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
DML21 += src/dml2_standalone_libraries/lib_float_math.o
DML21 += dml21_translation_helper.o
DML21 += dml21_wrapper.o
+DML21 += dml21_wrapper_fpu.o
DML21 += dml21_utils.o
AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index 847fab508750..2f0e0048bea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -90,7 +90,8 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
- unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ unsigned int hblank_start, vblank_start;
+ uint64_t min_hardware_refresh_in_uhz;
uint32_t pix_clk_100hz;
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
@@ -105,7 +106,7 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
- timing->interlaced = stream->timing.flags.INTERLACE;
+ timing->interlaced = (stream->timing.flags.INTERLACE != 0);
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
@@ -137,7 +138,11 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
(timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
+ {
+ uint64_t min_refresh = max((uint64_t)stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
+ ASSERT(min_refresh <= ULONG_MAX);
+ timing->drr_config.min_refresh_uhz = (unsigned long)min_refresh;
+ }
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -601,27 +606,33 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->composition.viewport.stationary = false;
- if (plane_state->cm.flags.bits.lut3d_dma_enable) {
+ if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
plane->tdlut.setup_for_tdlut = true;
- switch (plane_state->cm.lut3d_dma.swizzle) {
- case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
- case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
+ switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) {
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_sw_linear;
break;
- case CM_LUT_1D_PACKED_LINEAR:
- default:
+ case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear;
break;
}
- switch (plane_state->cm.lut3d_dma.size) {
- case CM_LUT_SIZE_333333:
+ switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_333333:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
break;
- case CM_LUT_SIZE_171717:
+ // handling when use case and HW support available
+ case DC_CM2_GPU_MEM_SIZE_454545:
+ case DC_CM2_GPU_MEM_SIZE_656565:
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
default:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
+ //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
}
@@ -691,7 +702,7 @@ unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
- return -1;
+ return UINT_MAX;
}
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index ab7ec24268be..4724b08c77e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
@@ -420,8 +420,12 @@ static unsigned int dml21_build_fams2_stream_programming_v2(const struct dc *dc,
type = static_base_state->stream_v1.base.type;
/* get information from context */
- static_base_state->stream_v1.base.num_planes = context->stream_status[dc_stream_idx].plane_count;
- static_base_state->stream_v1.base.otg_inst = context->stream_status[dc_stream_idx].primary_otg_inst;
+ ASSERT(context->stream_status[dc_stream_idx].plane_count >= 0 &&
+ context->stream_status[dc_stream_idx].plane_count <= 0xFF);
+ ASSERT(context->stream_status[dc_stream_idx].primary_otg_inst >= 0 &&
+ context->stream_status[dc_stream_idx].primary_otg_inst <= 0xFF);
+ static_base_state->stream_v1.base.num_planes = (uint8_t)context->stream_status[dc_stream_idx].plane_count;
+ static_base_state->stream_v1.base.otg_inst = (uint8_t)context->stream_status[dc_stream_idx].primary_otg_inst;
/* populate pipe masks for planes */
for (dc_plane_idx = 0; dc_plane_idx < context->stream_status[dc_stream_idx].plane_count; dc_plane_idx++) {
@@ -458,7 +462,9 @@ static unsigned int dml21_build_fams2_stream_programming_v2(const struct dc *dc,
switch (dc->debug.fams_version.minor) {
case 1:
default:
- static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ ASSERT(phantom_status->primary_otg_inst >= 0 &&
+ phantom_status->primary_otg_inst <= 0xFF);
+ static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = (uint8_t)phantom_status->primary_otg_inst;
/* populate pipe masks for phantom planes */
for (dc_plane_idx = 0; dc_plane_idx < phantom_status->plane_count; dc_plane_idx++) {
@@ -516,7 +522,8 @@ void dml21_build_fams2_programming(const struct dc *dc,
context->bw_ctx.bw.dcn.fams2_global_config.num_streams = num_fams2_streams;
}
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching =
+ (context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable != 0);
}
bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 2623e917ec28..7398f8b69adb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -9,6 +9,10 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#include "dml2_wrapper.h"
+#include "dml2_wrapper_fpu.h"
+#include "dml21_wrapper.h"
+#include "dml21_wrapper_fpu.h"
#include "dc_fpu.h"
#if !defined(DC_RUN_WITH_PREEMPTION_ENABLED)
@@ -34,50 +38,18 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)));
+
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
return true;
}
-static void dml21_populate_configuration_options(const struct dc *in_dc,
- struct dml2_context *dml_ctx,
- const struct dml2_configuration_options *config)
-{
- dml_ctx->config = *config;
-
- /* UCLK P-State options */
- if (in_dc->debug.dml21_force_pstate_method) {
- dml_ctx->config.pmo.force_pstate_method_enable = true;
- for (int i = 0; i < MAX_PIPES; i++)
- dml_ctx->config.pmo.force_pstate_method_values[i] = in_dc->debug.dml21_force_pstate_method_values[i];
- } else {
- dml_ctx->config.pmo.force_pstate_method_enable = false;
- }
-}
-
-static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
-{
-
- dml_ctx->architecture = dml2_architecture_21;
-
- dml21_populate_configuration_options(in_dc, dml_ctx, config);
-
- DC_FP_START();
-
- dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
-
- dml2_initialize_instance(&dml_ctx->v21.dml_init);
-
- DC_FP_END();
-}
-
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
/* Allocate memory for initializing DML21 instance */
- if (!dml21_allocate_memory(dml_ctx)) {
+ if (!dml21_allocate_memory(dml_ctx))
return false;
- }
dml21_init(in_dc, *dml_ctx, config);
@@ -90,337 +62,6 @@ void dml21_destroy(struct dml2_context *dml2)
vfree(dml2->v21.mode_programming.programming);
}
-static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
- struct dml2_context *in_ctx, unsigned int pipe_cnt)
-{
- unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0;
- struct dml2_per_plane_programming *pln_prog = NULL;
- struct dml2_per_stream_programming *stream_prog = NULL;
- struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
- struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
- int num_pipes;
- unsigned int dml_phantom_prog_idx;
-
- context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
-
- /* copy global DCHUBBUB arbiter registers */
- memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.arb_regs, sizeof(struct dml2_display_arb_regs));
-
- /* legacy only */
- context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_regs.arb_regs.compbuf_size * 64;
-
- context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
- context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
- context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
-
- /* phantom's start after main planes */
- dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes;
-
- for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
- pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
-
- if (!pln_prog->plane_descriptor)
- continue;
-
- stream_prog = &in_ctx->v21.mode_programming.programming->stream_programming[pln_prog->plane_descriptor->stream_index];
- num_dpps_required = pln_prog->num_dpps_required;
-
- if (num_dpps_required == 0) {
- continue;
- }
- num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
-
- if (num_pipes <= 0)
- continue;
-
- /* program each pipe */
- for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
- dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog);
-
- if (pln_prog->phantom_plane.valid && dc_phantom_pipes[dc_pipe_index]) {
- dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
- }
- }
-
- /* copy per plane mcache allocation */
- memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
- if (pln_prog->phantom_plane.valid) {
- memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx],
- &pln_prog->phantom_plane.mcache_allocation,
- sizeof(struct dml2_mcache_surface_allocation));
-
- dml_phantom_prog_idx++;
- }
- }
-
- /* assign global clocks */
- context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
- context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
- if (in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values > 1) {
- context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz =
- in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values] * 1000;
- } else {
- context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[0] * 1000;
- }
-
- if (in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values > 1) {
- context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
- in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values] * 1000;
- } else {
- context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[0] * 1000;
- }
-
- /* get global mall allocation */
- if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
- context->bw_ctx.bw.dcn.clk.num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
- } else {
- context->bw_ctx.bw.dcn.clk.num_ways = 0;
- }
-}
-
-static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
-{
- int dc_plane_idx = 0;
- int dml_prog_idx, stream_idx, plane_idx;
- struct dml2_per_plane_programming *pln_prog = NULL;
-
- for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
- for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
- dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
- if (dml_prog_idx == INVALID) {
- continue;
- }
- pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
- mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
- mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
- mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
- mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
- mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
- memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
- pln_prog->mcache_allocation.mcache_x_offsets_plane0,
- sizeof(int) * (DML2_MAX_MCACHES + 1));
- memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
- pln_prog->mcache_allocation.mcache_x_offsets_plane1,
- sizeof(int) * (DML2_MAX_MCACHES + 1));
- dc_plane_idx++;
- }
- }
-}
-
-static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
-{
- bool result = false;
- struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
- struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
-
- memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
- memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
- memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params, 0, sizeof(struct dml2_core_mode_programming_in_out));
-
- if (!context)
- return true;
-
- if (context->stream_count == 0) {
- dml21_init_min_clocks_for_dc_state(dml_ctx, context);
- dml21_build_fams2_programming(in_dc, context, dml_ctx);
- return true;
- }
-
- /* scrub phantom's from current dc_state */
- dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
- dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
-
- /* Populate stream, plane mappings and other fields in display config. */
- result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- if (!result)
- return false;
-
- DC_FP_START();
- result = dml2_build_mode_programming(mode_programming);
- DC_FP_END();
- if (!result)
- return false;
-
- /* Check and map HW resources */
- if (result && !dml_ctx->config.skip_hw_state_mapping) {
- dml21_map_hw_resources(dml_ctx);
- dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
- /* if subvp phantoms are present, expand them into dc context */
- dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
-
- if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
- //Prepare mcache params for each plane based on mcache output from DML
- dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
-
- //populate mcache regs to each pipe
- dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
- }
- }
-
- /* Copy DML CLK, WM and REG outputs to bandwidth context */
- if (result && !dml_ctx->config.skip_hw_state_mapping) {
- dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
- dml21_copy_clocks_to_dc_state(dml_ctx, context);
- dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
- dml21_build_fams2_programming(in_dc, context, dml_ctx);
- }
-
- return true;
-}
-
-static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
-{
- bool is_supported = false;
- struct dml2_initialize_instance_in_out *dml_init = &dml_ctx->v21.dml_init;
- struct dml2_check_mode_supported_in_out *mode_support = &dml_ctx->v21.mode_support;
-
- memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
- memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
- memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.check_mode_supported_locals.mode_support_params, 0, sizeof(struct dml2_core_mode_support_in_out));
-
- if (!context || context->stream_count == 0)
- return true;
-
- /* Scrub phantom's from current dc_state */
- dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
- dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
-
- mode_support->dml2_instance = dml_init->dml2_instance;
- dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
- DC_FP_START();
- is_supported = dml2_check_mode_supported(mode_support);
- DC_FP_END();
- if (!is_supported)
- return false;
-
- return true;
-}
-
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
- enum dc_validate_mode validate_mode)
-{
- bool out = false;
-
- /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
- if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
- out = dml21_check_mode_support(in_dc, context, dml_ctx);
- else
- out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
-
- return out;
-}
-
-void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
-{
- unsigned int dml_prog_idx, dml_phantom_prog_idx, dc_pipe_index;
- int num_pipes;
- struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
- struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
-
- struct dml2_per_plane_programming *pln_prog = NULL;
- struct dml2_plane_mcache_configuration_descriptor *mcache_config = NULL;
- struct prepare_mcache_programming_locals *l = &dml_ctx->v21.scratch.prepare_mcache_locals;
-
- if (context->stream_count == 0) {
- return;
- }
-
- memset(&l->build_mcache_programming_params, 0, sizeof(struct dml2_build_mcache_programming_in_out));
- l->build_mcache_programming_params.dml2_instance = dml_ctx->v21.dml_init.dml2_instance;
-
- /* phantom's start after main planes */
- dml_phantom_prog_idx = dml_ctx->v21.mode_programming.programming->display_config.num_planes;
-
- /* Build mcache programming parameters per plane per pipe */
- for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
- pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
-
- mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_prog_idx];
- memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
- mcache_config->plane_descriptor = pln_prog->plane_descriptor;
- mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx];
- mcache_config->num_pipes = pln_prog->num_dpps_required;
- l->build_mcache_programming_params.num_configurations++;
-
- if (pln_prog->num_dpps_required == 0) {
- continue;
- }
-
- num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
- if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL ||
- dc_main_pipes[0]->plane_state == NULL)
- continue;
-
- /* get config for each pipe */
- for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
- ASSERT(dc_main_pipes[dc_pipe_index]);
- dml21_get_pipe_mcache_config(context, dc_main_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
- }
-
- /* get config for each phantom pipe */
- if (pln_prog->phantom_plane.valid &&
- dc_phantom_pipes[0] &&
- dc_main_pipes[0]->stream &&
- dc_phantom_pipes[0]->plane_state) {
- mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx];
- memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
- mcache_config->plane_descriptor = pln_prog->plane_descriptor;
- mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx];
- mcache_config->num_pipes = pln_prog->num_dpps_required;
- l->build_mcache_programming_params.num_configurations++;
-
- for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
- ASSERT(dc_phantom_pipes[dc_pipe_index]);
- dml21_get_pipe_mcache_config(context, dc_phantom_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
- }
-
- /* increment phantom index */
- dml_phantom_prog_idx++;
- }
- }
-
- /* Call to generate mcache programming per plane per pipe for the given display configuration */
- dml2_build_mcache_programming(&l->build_mcache_programming_params);
-
- /* get per plane per pipe mcache programming */
- for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
- pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
-
- num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
- if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL ||
- dc_main_pipes[0]->plane_state == NULL)
- continue;
-
- /* get config for each pipe */
- for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
- ASSERT(dc_main_pipes[dc_pipe_index]);
- if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index]) {
- memcpy(&dc_main_pipes[dc_pipe_index]->mcache_regs,
- l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index],
- sizeof(struct dml2_hubp_pipe_mcache_regs));
- }
- }
-
- /* get config for each phantom pipe */
- if (pln_prog->phantom_plane.valid &&
- dc_phantom_pipes[0] &&
- dc_main_pipes[0]->stream &&
- dc_phantom_pipes[0]->plane_state) {
- for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
- ASSERT(dc_phantom_pipes[dc_pipe_index]);
- if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) {
- memcpy(&dc_phantom_pipes[dc_pipe_index]->mcache_regs,
- l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index],
- sizeof(struct dml2_hubp_pipe_mcache_regs));
- }
- }
- /* increment phantom index */
- dml_phantom_prog_idx++;
- }
- }
-}
-
void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx)
{
@@ -446,12 +87,8 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
- DC_FP_START();
-
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
-
- DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
@@ -466,8 +103,3 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
-{
- dml21_init(in_dc, dml_ctx, config);
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
index b508bbcc0e16..c4813c51251b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
@@ -34,36 +34,6 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
-
-/**
- * dml21_validate - Determines if a display configuration is supported or not.
- * @in_dc: dc.
- * @context: dc_state to be validated.
- * @dml_ctx: dml21 context.
- * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
- * will not populate context.res_ctx.
- *
- * Based on fast_validate option internally would call:
- *
- * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
- * Calculates if dc_state can be supported on the input display
- * configuration. If supported, generates the necessary HW
- * programming for the new dc_state.
- *
- * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
- * Calculates if dc_state can be supported for the input display
- * config.
- *
- * Context: Two threads may not invoke this function concurrently unless they reference
- * separate dc_states for validation.
- * Return: True if mode is supported, false otherwise.
- */
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
- enum dc_validate_mode validate_mode);
-
-/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
-void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
/* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */
struct socbb_ip_params_external {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c
new file mode 100644
index 000000000000..cc992af6ac9c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#include "dml2_internal_types.h"
+#include "dml_top.h"
+#include "dml2_core_dcn4_calcs.h"
+#include "dml2_internal_shared_types.h"
+#include "dml21_utils.h"
+#include "dml21_translation_helper.h"
+#include "dml2_dc_resource_mgmt.h"
+#include "dml2_wrapper.h"
+#include "dml2_wrapper_fpu.h"
+#include "dml21_wrapper.h"
+#include "dml21_wrapper_fpu.h"
+
+#define INVALID -1
+
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
+{
+ dml_ctx->config = *config;
+
+ /* UCLK P-State options */
+ if (in_dc->debug.dml21_force_pstate_method) {
+ dml_ctx->config.pmo.force_pstate_method_enable = true;
+ for (int i = 0; i < MAX_PIPES; i++)
+ dml_ctx->config.pmo.force_pstate_method_values[i] = in_dc->debug.dml21_force_pstate_method_values[i];
+ } else {
+ dml_ctx->config.pmo.force_pstate_method_enable = false;
+ }
+}
+
+void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+{
+ dml_ctx->architecture = dml2_architecture_21;
+
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
+
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
+
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
+}
+
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+{
+ dml21_init(in_dc, dml_ctx, config);
+}
+
+static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
+ struct dml2_context *in_ctx, unsigned int pipe_cnt)
+{
+ unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+ struct dml2_per_stream_programming *stream_prog = NULL;
+ struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
+ struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
+ int num_pipes;
+ unsigned int dml_phantom_prog_idx;
+
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+
+ /* copy global DCHUBBUB arbiter registers */
+ memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.arb_regs, sizeof(struct dml2_display_arb_regs));
+
+ /* legacy only */
+ context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_regs.arb_regs.compbuf_size * 64;
+
+ context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+
+ /* phantom's start after main planes */
+ dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes;
+
+ for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
+ pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+
+ if (!pln_prog->plane_descriptor)
+ continue;
+
+ stream_prog = &in_ctx->v21.mode_programming.programming->stream_programming[pln_prog->plane_descriptor->stream_index];
+ num_dpps_required = pln_prog->num_dpps_required;
+
+ if (num_dpps_required == 0) {
+ continue;
+ }
+ num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
+
+ if (num_pipes <= 0)
+ continue;
+
+ /* program each pipe */
+ for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
+ dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog);
+
+ if (pln_prog->phantom_plane.valid && dc_phantom_pipes[dc_pipe_index]) {
+ dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
+ }
+ }
+
+ /* copy per plane mcache allocation */
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+ if (pln_prog->phantom_plane.valid) {
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx],
+ &pln_prog->phantom_plane.mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+
+ dml_phantom_prog_idx++;
+ }
+ }
+
+ /* assign global clocks */
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+ if (in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values > 1) {
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz =
+ in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values] * 1000;
+ } else {
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[0] * 1000;
+ }
+
+ if (in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values > 1) {
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
+ in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values] * 1000;
+ } else {
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[0] * 1000;
+ }
+
+ /* get global mall allocation */
+ if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ context->bw_ctx.bw.dcn.clk.num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
+ } else {
+ context->bw_ctx.bw.dcn.clk.num_ways = 0;
+ }
+}
+
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
+static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
+{
+ bool is_supported = false;
+ struct dml2_initialize_instance_in_out *dml_init = &dml_ctx->v21.dml_init;
+ struct dml2_check_mode_supported_in_out *mode_support = &dml_ctx->v21.mode_support;
+
+ memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
+ memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
+ memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.check_mode_supported_locals.mode_support_params, 0, sizeof(struct dml2_core_mode_support_in_out));
+
+ if (!context || context->stream_count == 0)
+ return true;
+
+ /* Scrub phantom's from current dc_state */
+ dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
+ dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
+
+ mode_support->dml2_instance = dml_init->dml2_instance;
+ dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+
+ is_supported = dml2_check_mode_supported(mode_support);
+
+ if (!is_supported)
+ return false;
+
+ return true;
+}
+
+static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
+{
+ bool result = false;
+ struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
+
+ memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
+ memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
+ memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params, 0, sizeof(struct dml2_core_mode_programming_in_out));
+
+ if (!context)
+ return true;
+
+ if (context->stream_count == 0) {
+ dml21_init_min_clocks_for_dc_state(dml_ctx, context);
+ dml21_build_fams2_programming(in_dc, context, dml_ctx);
+ return true;
+ }
+
+ /* scrub phantom's from current dc_state */
+ dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
+ dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
+
+ /* Populate stream, plane mappings and other fields in display config. */
+ result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ if (!result)
+ return false;
+
+ result = dml2_build_mode_programming(mode_programming);
+
+ if (!result)
+ return false;
+
+ /* Check and map HW resources */
+ if (result && !dml_ctx->config.skip_hw_state_mapping) {
+ dml21_map_hw_resources(dml_ctx);
+ dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
+ /* if subvp phantoms are present, expand them into dc context */
+ dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
+ }
+
+ /* Copy DML CLK, WM and REG outputs to bandwidth context */
+ if (result && !dml_ctx->config.skip_hw_state_mapping) {
+ dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
+ dml21_copy_clocks_to_dc_state(dml_ctx, context);
+ dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
+ dml21_build_fams2_programming(in_dc, context, dml_ctx);
+ }
+
+ return true;
+}
+
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
+{
+ bool out = false;
+
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml21_check_mode_support(in_dc, context, dml_ctx);
+ else
+ out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+
+ return out;
+}
+
+void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
+{
+ unsigned int dml_prog_idx, dml_phantom_prog_idx, dc_pipe_index;
+ int num_pipes;
+ struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
+ struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
+
+ struct dml2_per_plane_programming *pln_prog = NULL;
+ struct dml2_plane_mcache_configuration_descriptor *mcache_config = NULL;
+ struct prepare_mcache_programming_locals *l = &dml_ctx->v21.scratch.prepare_mcache_locals;
+
+ if (context->stream_count == 0) {
+ return;
+ }
+
+ memset(&l->build_mcache_programming_params, 0, sizeof(struct dml2_build_mcache_programming_in_out));
+ l->build_mcache_programming_params.dml2_instance = dml_ctx->v21.dml_init.dml2_instance;
+
+ /* phantom's start after main planes */
+ dml_phantom_prog_idx = dml_ctx->v21.mode_programming.programming->display_config.num_planes;
+
+ /* Build mcache programming parameters per plane per pipe */
+ for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+
+ mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_prog_idx];
+ memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
+ mcache_config->plane_descriptor = pln_prog->plane_descriptor;
+ mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx];
+ ASSERT(pln_prog->num_dpps_required <= 0x7F);
+ mcache_config->num_pipes = (char)pln_prog->num_dpps_required;
+ l->build_mcache_programming_params.num_configurations++;
+
+ if (pln_prog->num_dpps_required == 0) {
+ continue;
+ }
+
+ num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
+ if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL ||
+ dc_main_pipes[0]->plane_state == NULL)
+ continue;
+
+ /* get config for each pipe */
+ for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
+ ASSERT(dc_main_pipes[dc_pipe_index]);
+ dml21_get_pipe_mcache_config(context, dc_main_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
+ }
+
+ /* get config for each phantom pipe */
+ if (pln_prog->phantom_plane.valid &&
+ dc_phantom_pipes[0] &&
+ dc_main_pipes[0]->stream &&
+ dc_phantom_pipes[0]->plane_state) {
+ mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx];
+ memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
+ mcache_config->plane_descriptor = pln_prog->plane_descriptor;
+ mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx];
+ ASSERT(pln_prog->num_dpps_required <= 0x7F);
+ mcache_config->num_pipes = (char)pln_prog->num_dpps_required;
+ l->build_mcache_programming_params.num_configurations++;
+
+ for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
+ ASSERT(dc_phantom_pipes[dc_pipe_index]);
+ dml21_get_pipe_mcache_config(context, dc_phantom_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
+ }
+
+ /* increment phantom index */
+ dml_phantom_prog_idx++;
+ }
+ }
+
+ /* Call to generate mcache programming per plane per pipe for the given display configuration */
+ dml2_build_mcache_programming(&l->build_mcache_programming_params);
+
+ /* get per plane per pipe mcache programming */
+ for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+
+ num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
+ if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL ||
+ dc_main_pipes[0]->plane_state == NULL)
+ continue;
+
+ /* get config for each pipe */
+ for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
+ ASSERT(dc_main_pipes[dc_pipe_index]);
+ if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index]) {
+ memcpy(&dc_main_pipes[dc_pipe_index]->mcache_regs,
+ l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index],
+ sizeof(struct dml2_hubp_pipe_mcache_regs));
+ }
+ }
+
+ /* get config for each phantom pipe */
+ if (pln_prog->phantom_plane.valid &&
+ dc_phantom_pipes[0] &&
+ dc_main_pipes[0]->stream &&
+ dc_phantom_pipes[0]->plane_state) {
+ for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
+ ASSERT(dc_phantom_pipes[dc_pipe_index]);
+ if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) {
+ memcpy(&dc_phantom_pipes[dc_pipe_index]->mcache_regs,
+ l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index],
+ sizeof(struct dml2_hubp_pipe_mcache_regs));
+ }
+ }
+ /* increment phantom index */
+ dml_phantom_prog_idx++;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.h
new file mode 100644
index 000000000000..e5d9a456645f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#ifndef _DML21_WRAPPER_FPU_H_
+#define _DML21_WRAPPER_FPU_H_
+
+#include "os_types.h"
+#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
+
+struct dc;
+struct dc_state;
+struct dml2_configuration_options;
+struct dml2_context;
+enum dc_validate_mode;
+
+/**
+ * dml21_init - Initialize DML21 context
+ * @in_dc: dc.
+ * @dml_ctx: DML21 context to initialize.
+ * @config: dml21 configuration options.
+ *
+ * Performs FPU-requiring initialization. Must be called with FPU protection.
+ */
+void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
+
+/**
+ * dml21_validate - Determines if a display configuration is supported or not.
+ * @in_dc: dc.
+ * @context: dc_state to be validated.
+ * @dml_ctx: dml21 context.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
+ *
+ * Based on fast_validate option internally would call:
+ *
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
+ * Calculates if dc_state can be supported on the input display
+ * configuration. If supported, generates the necessary HW
+ * programming for the new dc_state.
+ *
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
+ * Calculates if dc_state can be supported for the input display
+ * config.
+ *
+ * Context: Two threads may not invoke this function concurrently unless they reference
+ * separate dc_states for validation.
+ * Return: True if mode is supported, false otherwise.
+ */
+
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config);
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
+
+/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
+void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
+
+#endif /* _DML21_WRAPPER_FPU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index d17e59d684fd..ab0b4a4b5d65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -279,6 +279,26 @@ static bool round_up_and_copy_to_next_dpm(unsigned long min_value, unsigned long
bool result = false;
int index = 0;
+ /* Guard against empty clock tables (e.g. DTBCLK on DCN42B where the
+ * clock is tied off and num_clk_values == 0). Without this check the
+ * else-if branch below would evaluate
+ * clk_values_khz[num_clk_values - 1] with num_clk_values == 0, which
+ * wraps the unsigned char index to 255 — a 235-element out-of-bounds
+ * read on an array of DML_MAX_CLK_TABLE_SIZE (20) entries.
+ *
+ * Semantic: if the clock doesn't exist on this ASIC but no frequency
+ * is required (min_value == 0), the request is trivially satisfied.
+ * If a non-zero frequency is required but the clock is absent, the
+ * configuration is unsupportable.
+ */
+ if (clock_table->num_clk_values == 0) {
+ if (min_value == 0) {
+ *rounded_value = 0;
+ return true;
+ }
+ return false;
+ }
+
if (clock_table->num_clk_values > 2) {
while (index < clock_table->num_clk_values && clock_table->clk_values_khz[index] < min_value)
index++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index a265f254152c..eab13e1c96fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -178,9 +178,12 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1];
min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1];
- min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1];
- min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
- min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dscclk = (soc_bb->clk_table.dscclk.num_clk_values > 0) ?
+ soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1] : 0;
+ min_table->max_clocks_khz.dtbclk = (soc_bb->clk_table.dtbclk.num_clk_values > 0) ?
+ soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1] : 0;
+ min_table->max_clocks_khz.phyclk = (soc_bb->clk_table.phyclk.num_clk_values > 0) ?
+ soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1] : 0;
min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
index 1f67cbc2c236..3eaeff39ee79 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
@@ -54,9 +54,12 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1];
min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1];
- min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1];
- min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
- min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dscclk = (soc_bb->clk_table.dscclk.num_clk_values > 0) ?
+ soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1] : 0;
+ min_table->max_clocks_khz.dtbclk = (soc_bb->clk_table.dtbclk.num_clk_values > 0) ?
+ soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1] : 0;
+ min_table->max_clocks_khz.phyclk = (soc_bb->clk_table.phyclk.num_clk_values > 0) ?
+ soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1] : 0;
min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index fd3c61509f1b..6ef93c6fc1cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
@@ -35,7 +35,7 @@
#define MAX_MPCC_FACTOR 4
struct dc_plane_pipe_pool {
- int pipes_assigned_to_plane[MAX_ODM_FACTOR][MAX_MPCC_FACTOR];
+ unsigned int pipes_assigned_to_plane[MAX_ODM_FACTOR][MAX_MPCC_FACTOR];
bool pipe_used[MAX_ODM_FACTOR][MAX_MPCC_FACTOR];
int num_pipes_assigned_to_plane_for_mpcc_combine;
int num_pipes_assigned_to_plane_for_odm_combine;
@@ -340,8 +340,8 @@ static bool is_pipe_in_candidate_array(const unsigned int pipe_idx,
static bool find_more_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state, // The state we want to find a free mapping in
unsigned int stream_id, // The stream we want this pipe to drive
- int *assigned_pipes,
- int *assigned_pipe_count,
+ unsigned int *assigned_pipes,
+ unsigned int *assigned_pipe_count,
int pipes_needed,
const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to
{
@@ -366,7 +366,8 @@ static bool find_more_pipes_for_stream(struct dml2_context *ctx,
if (!is_plane_using_pipe(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = preferred_pipe_candidates[i];
+ ASSERT(preferred_pipe_candidates[i] <= 0xFF);
+ pipe->pipe_idx = (uint8_t)preferred_pipe_candidates[i];
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
@@ -382,7 +383,8 @@ static bool find_more_pipes_for_stream(struct dml2_context *ctx,
if (!is_plane_using_pipe(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = i;
+ ASSERT(i >= 0 && i <= 0xFF);
+ pipe->pipe_idx = (uint8_t)i;
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
@@ -393,7 +395,8 @@ static bool find_more_pipes_for_stream(struct dml2_context *ctx,
if (!is_plane_using_pipe(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = last_resort_pipe_candidates[i];
+ ASSERT(last_resort_pipe_candidates[i] <= 0xFF);
+ pipe->pipe_idx = (uint8_t)last_resort_pipe_candidates[i];
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
@@ -406,8 +409,8 @@ static bool find_more_pipes_for_stream(struct dml2_context *ctx,
static bool find_more_free_pipes(struct dml2_context *ctx,
struct dc_state *state, // The state we want to find a free mapping in
unsigned int stream_id, // The stream we want this pipe to drive
- int *assigned_pipes,
- int *assigned_pipe_count,
+ unsigned int *assigned_pipes,
+ unsigned int *assigned_pipe_count,
int pipes_needed,
const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to
{
@@ -432,7 +435,8 @@ static bool find_more_free_pipes(struct dml2_context *ctx,
if (is_pipe_free(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = preferred_pipe_candidates[i];
+ ASSERT(preferred_pipe_candidates[i] <= 0xFF);
+ pipe->pipe_idx = (uint8_t)preferred_pipe_candidates[i];
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
@@ -448,7 +452,8 @@ static bool find_more_free_pipes(struct dml2_context *ctx,
if (is_pipe_free(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = i;
+ ASSERT(i >= 0 && i <= 0xFF);
+ pipe->pipe_idx = (uint8_t)i;
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
@@ -459,7 +464,8 @@ static bool find_more_free_pipes(struct dml2_context *ctx,
if (is_pipe_free(pipe)) {
pipes_needed--;
// TODO: This doens't make sense really, pipe_idx should always be valid
- pipe->pipe_idx = last_resort_pipe_candidates[i];
+ ASSERT(last_resort_pipe_candidates[i] <= 0xFF);
+ pipe->pipe_idx = (uint8_t)last_resort_pipe_candidates[i];
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index d56e58ce26c7..9bbe4e058be7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -555,7 +555,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
- vblank_index = i;
+ ASSERT(i <= 0xFF);
+ vblank_index = (uint8_t)i;
found = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index e25b88f2d6b9..cf3a69aba638 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -330,8 +330,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
{
struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch;
struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params;
- unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
- unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0};
+ int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
+ int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0};
unsigned int dml_project = dml2->v20.dml_core_ctx.project;
unsigned int i = 0;
@@ -765,7 +765,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
out->PixelClock[location] *= 2;
out->HTotal[location] = in->timing.h_total;
out->VTotal[location] = in->timing.v_total;
- out->Interlace[location] = in->timing.flags.INTERLACE;
+ out->Interlace[location] = (in->timing.flags.INTERLACE != 0);
hblank_start = in->timing.h_total - in->timing.h_front_porch;
out->HBlankEnd[location] = hblank_start
- in->timing.h_addressable
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..6c7cdf102906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
@@ -255,7 +255,8 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str
pipe_ctx->pipe_dlg_param.vupdate_width = dml_get_vupdate_width(mode_lib, pipe_idx);
pipe_ctx->pipe_dlg_param.vready_offset = dml_get_vready_offset(mode_lib, pipe_idx);
- pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
+ ASSERT(pipe_ctx->stream_res.tg->inst >= 0 && pipe_ctx->stream_res.tg->inst <= 0xFF);
+ pipe_ctx->pipe_dlg_param.otg_inst = (unsigned char)pipe_ctx->stream_res.tg->inst;
pipe_ctx->pipe_dlg_param.hactive = hactive;
pipe_ctx->pipe_dlg_param.vactive = vactive;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 408559d6fb2d..93b7613fc4f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -6,8 +6,24 @@
*/
#include "dml2_internal_types.h"
+#include "dml2_wrapper.h"
#include "dml2_wrapper_fpu.h"
+#include "dml21_wrapper.h"
+#include "dml21_wrapper_fpu.h"
+#include "dc_fpu.h"
+
+#if !defined(DC_RUN_WITH_PREEMPTION_ENABLED)
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !DC_RUN_WITH_PREEMPTION_ENABLED
+
+struct dml2_context *dml2_allocate_memory(void)
+{
+ struct dml2_context *dml2;
+
+ DC_RUN_WITH_PREEMPTION_ENABLED(dml2 = vzalloc(sizeof(struct dml2_context)));
+ return dml2;
+}
bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
enum dc_validate_mode validate_mode)
{
@@ -23,16 +39,12 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
return out;
}
- DC_FP_START();
-
/* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml2_validate_only(context, validate_mode);
else
out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
- DC_FP_END();
-
return out;
}
@@ -70,15 +82,12 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
- DC_FP_START();
-
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
- DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c
index 203eef747262..66624cfc27b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c
@@ -31,8 +31,10 @@
#include "dml2_translation_helper.h"
#include "dml2_mall_phantom.h"
#include "dml2_dc_resource_mgmt.h"
-#include "dml21_wrapper.h"
+#include "dml2_wrapper.h"
#include "dml2_wrapper_fpu.h"
+#include "dml21_wrapper.h"
+#include "dml21_wrapper_fpu.h"
void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
@@ -546,11 +548,6 @@ void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
}
}
-inline struct dml2_context *dml2_allocate_memory(void)
-{
- return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
-}
-
void dml2_destroy(struct dml2_context *dml2)
{
if (!dml2)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 0e70ffc784b1..ef605e0a75e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -288,6 +288,7 @@ void dpp1_cnv_setup (
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
{
+ (void)alpha_2bit_lut;
uint32_t pixel_format;
uint32_t alpha_en;
enum pixel_format_description fmt ;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
index c433f4b876e9..8d5000790904 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
@@ -92,7 +92,10 @@ void dpp2_power_on_obuf(
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
const struct dc_gamma *gamma)
-{}
+{
+ (void)dpp_base;
+ (void)gamma;
+}
static void dpp2_cnv_setup (
struct dpp *dpp_base,
@@ -369,7 +372,11 @@ void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
enum opp_regamma mode)
-{}
+{
+ (void)dpp;
+ (void)params;
+ (void)mode;
+}
static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
index 31613372e214..26f9485f165d 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
@@ -1016,6 +1016,7 @@ static void dpp20_set_3dlut_mode(
bool is_color_channel_12bits,
bool is_lut_size17x17x17)
{
+ (void)is_color_channel_12bits;
uint32_t lut_mode;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 8a146968ee15..e7880fc61b4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -1307,6 +1307,7 @@ static void dpp3_set_3dlut_mode(
bool is_color_channel_12bits,
bool is_lut_size17x17x17)
{
+ (void)is_color_channel_12bits;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 3284084ca7ad..8170a86ad0ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -80,6 +80,7 @@ static void dpp3_program_gammcor_lut(
uint32_t num,
bool is_ram_a)
{
+ (void)is_ram_a;
uint32_t i;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 62bf7cea21d8..821d5173b59f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -132,6 +132,9 @@ void dpp401_set_cursor_position(
uint32_t width,
uint32_t height)
{
+ (void)param;
+ (void)width;
+ (void)height;
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t cur_en = pos->enable ? 1 : 0;
@@ -237,6 +240,8 @@ void dpp401_set_cursor_matrix(
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix)
{
+ (void)color_space;
+ (void)cursor_csc_color_matrix;
//Since we don't have cursor matrix information, force bypass mode by passing in unknown color space
dpp401_program_cursor_csc(dpp_base, COLOR_SPACE_UNKNOWN, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 8dfb6dd14eb2..5b3584ad5b6b 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -680,6 +680,9 @@ static void get_dsc_enc_caps(
} else {
build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -1097,14 +1100,13 @@ static bool setup_dsc_config(
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR422:
- if (policy.ycbcr422_simple) {
+ is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
+ sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
+ branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
+ if (!is_dsc_possible) {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422;
dsc_cfg->ycbcr422_simple = is_dsc_possible;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
- } else {
- is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
- sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
- branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
}
break;
case PIXEL_ENCODING_YCBCR420:
@@ -1404,7 +1406,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->min_target_bpp = 8;
/* DP specs limits to 3 x bpc */
policy->max_target_bpp = 3 * bpc;
- policy->ycbcr422_simple = true;
break;
case PIXEL_ENCODING_YCBCR420:
/* DP specs limits to 6 */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 6e1e759462bf..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -100,7 +100,7 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 17acb64a9d80..e712985f7abd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -128,7 +128,7 @@ void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int m
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index bbb8b5b18a4e..3bf737195bac 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -78,7 +78,7 @@ static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsign
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
@@ -107,6 +107,11 @@ void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_sta
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
+ REG_GET(DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, &s->dsc_block_pred_enable);
+ REG_GET(DSCC_PPS_CONFIG0, LINEBUF_DEPTH, &s->dsc_line_buf_depth);
+ REG_GET(DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, &s->dsc_version_minor);
+ REG_GET(DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, &s->dsc_rc_buffer_size);
+ REG_GET(DSCC_PPS_CONFIG0, SIMPLE_422, &s->dsc_simple_422);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index ad7ef83694ea..a16c60d8532f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -64,6 +64,11 @@ struct dcn_dsc_state {
uint32_t dsc_chunk_size;
uint32_t dsc_fw_en;
uint32_t dsc_opp_source;
+ uint32_t dsc_block_pred_enable;
+ uint32_t dsc_line_buf_depth;
+ uint32_t dsc_version_minor;
+ uint32_t dsc_rc_buffer_size;
+ uint32_t dsc_simple_422;
};
struct dcn_dsc_reg_state {
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index bc058f682438..0ee4f83a02eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
@@ -45,6 +45,7 @@
static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
{
+ (void)dwbc;
if (caps) {
caps->adapter_id = 0; /* we only support 1 adapter currently */
caps->hw_version = DCN_VERSION_3_0;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c
index dcbcf6b85abf..e7e1d9979876 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c
@@ -45,6 +45,7 @@ static bool offset_to_id(
enum gpio_id *id,
uint32_t *en)
{
+ (void)mask;
switch (offset) {
/* HPD */
case REG(HPD0_DC_HPD_INT_STATUS):
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index f3d562c8df4c..d81a71ac00d2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -60,6 +60,7 @@ bool dal_hw_factory_init(
enum dce_version dce_version,
enum dce_environment dce_environment)
{
+ (void)dce_environment;
switch (dce_version) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case DCE_VERSION_6_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
index 660510842ecf..f0d400972897 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
@@ -199,5 +199,6 @@ void dal_hw_gpio_construct(
void dal_hw_gpio_destruct(
struct hw_gpio *pin)
{
+ (void)pin;
ASSERT(!pin->base.opened);
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 1c977fc4d0e3..e6e36a912b13 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -64,6 +64,7 @@ bool dal_hw_translate_init(
enum dce_version dce_version,
enum dce_environment dce_environment)
{
+ (void)dce_environment;
switch (dce_version) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case DCE_VERSION_6_0:
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 5a03758e3de6..3c298192f359 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -943,6 +943,7 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz)
{
+ (void)dccg_ref_freq_inKhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t ref_div = 0;
uint32_t ref_en = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 43ba399f4822..82d4e3e0e5e8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -259,6 +259,7 @@ void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz)
{
+ (void)dccg_ref_freq_inKhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t ref_div = 0;
uint32_t ref_en = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index b0a4b68cf359..3b9542c08f3d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -70,6 +70,7 @@ bool hubbub401_program_urgent_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ (void)refclk_mhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
@@ -188,6 +189,7 @@ bool hubbub401_program_stutter_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ (void)refclk_mhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
@@ -287,6 +289,7 @@ bool hubbub401_program_pstate_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ (void)refclk_mhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
@@ -414,6 +417,7 @@ bool hubbub401_program_usr_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ (void)refclk_mhz;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
index a436fa71d4b4..73b6b0ffcb74 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
@@ -488,6 +488,8 @@ static bool hubbub42_program_watermarks(
static void hubbub42_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
+ (void)memory_channel_count;
+ (void)words_per_channel;
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t request_limit = 96; //MAX(12 * memory_channel_count, 96);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 6378e3fd7249..7c97a774141f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -143,6 +143,7 @@ void hubp1_program_tiling(
const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ (void)pixel_format;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
@@ -563,6 +564,7 @@ void hubp1_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size(hubp, format, plane_size, dcc);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 92288de4cc10..ceee5165fd6a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -313,6 +313,7 @@ static void hubp2_program_tiling(
const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ (void)pixel_format;
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
@@ -557,6 +558,7 @@ void hubp2_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0cc6f4558989..e2708e30eb1b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -321,6 +321,7 @@ void hubp3_program_tiling(
const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ (void)pixel_format;
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
@@ -418,6 +419,7 @@ void hubp3_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp3_dcc_control_sienna_cichlid(hubp, dcc);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 79c583e258c7..c879f4901c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -179,6 +179,7 @@ void hubp35_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp3_dcc_control_sienna_cichlid(hubp, dcc);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 263e0c4d34f6..5a816442deee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -657,6 +657,7 @@ void hubp401_program_tiling(
const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ (void)pixel_format;
/* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x
* All 4 fields NUM_PIPES, PIPE_INTERLEAVE, MAX_COMPRESSED_FRAGS and NUM_PKRS are irrelevant.
*
@@ -671,6 +672,7 @@ void hubp401_program_size(
const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc)
{
+ (void)dcc;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t pitch, pitch_c;
bool use_pitch_c = false;
@@ -709,6 +711,7 @@ void hubp401_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp401_dcc_control(hubp, dcc);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
index d85a4ab957a4..ad6badcceb12 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
@@ -301,6 +301,7 @@ static void hubp42_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level)
{
+ (void)compat_level;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp3_dcc_control_sienna_cichlid(hubp, dcc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 8a17cc036399..5273ca09fe12 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -201,6 +201,8 @@ static void enable_display_pipe_clock_gating(
struct dc_context *ctx,
bool clock_gating)
{
+ (void)ctx;
+ (void)clock_gating;
/*TODO*/
}
@@ -284,6 +286,7 @@ static bool
dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
+ (void)dc;
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
const struct dc_transfer_func *tf = NULL;
struct ipp_prescale_params prescale_params = { 0 };
@@ -488,15 +491,15 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
seg_distr[8] = 4;
seg_distr[9] = 4;
seg_distr[10] = 0;
- seg_distr[11] = -1;
- seg_distr[12] = -1;
- seg_distr[13] = -1;
- seg_distr[14] = -1;
- seg_distr[15] = -1;
+ seg_distr[11] = (uint32_t)-1;
+ seg_distr[12] = (uint32_t)-1;
+ seg_distr[13] = (uint32_t)-1;
+ seg_distr[14] = (uint32_t)-1;
+ seg_distr[15] = (uint32_t)-1;
}
for (k = 0; k < 16; k++) {
- if (seg_distr[k] != -1)
+ if (seg_distr[k] != (uint32_t)-1)
hw_points += (1 << seg_distr[k]);
}
@@ -607,6 +610,7 @@ static bool
dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
+ (void)dc;
struct transform *xfm = pipe_ctx->plane_res.xfm;
xfm->funcs->opp_power_on_regamma_lut(xfm, true);
@@ -1539,6 +1543,7 @@ static enum dc_status dce110_enable_stream_timing(
struct dc_state *context,
struct dc *dc)
{
+ (void)context;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
pipe_ctx[pipe_ctx->pipe_idx];
@@ -1568,7 +1573,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ if (dc_is_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
@@ -1986,7 +1991,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe_ctx = NULL;
struct dce_hwseq *hws = dc->hwseq;
int edp_with_sink_num;
- int edp_num;
+ unsigned int edp_num;
int edp_stream_num;
int i;
bool can_apply_edp_fast_boot = false;
@@ -2418,7 +2423,7 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
- if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal))
+ if (dc_is_tmds_signal(pipe_ctx_old->stream->signal))
pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
@@ -2728,6 +2733,7 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
static void update_plane_addr(const struct dc *dc,
struct pipe_ctx *pipe_ctx)
{
+ (void)dc;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
if (plane_state == NULL)
@@ -2814,6 +2820,8 @@ static void dce110_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
+ (void)state;
+ (void)group_index;
struct dcp_gsl_params gsl_params = { 0 };
int i;
DC_LOGGER_INIT(dc->ctx);
@@ -2889,6 +2897,8 @@ static void dce110_enable_per_frame_crtc_position_reset(
static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
+ (void)context;
+ (void)dc;
// Do nothing
}
@@ -3154,10 +3164,13 @@ static void dce110_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
+ (void)context;
}
static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
+ (void)state;
struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
@@ -3178,6 +3191,9 @@ static void dce110_wait_for_mpcc_disconnect(
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx)
{
+ (void)dc;
+ (void)res_pool;
+ (void)pipe_ctx;
/* do nothing*/
}
@@ -3187,6 +3203,10 @@ static void program_output_csc(struct dc *dc,
uint16_t *matrix,
int opp_id)
{
+ (void)dc;
+ (void)colorspace;
+ (void)matrix;
+ (void)opp_id;
int i;
struct out_csc_color_matrix tbl_entry;
@@ -3331,6 +3351,7 @@ void dce110_enable_lvds_link_output(struct dc_link *link,
enum clock_source_id clock_source,
uint32_t pixel_clock)
{
+ (void)link_res;
link->link_enc->funcs->enable_lvds_output(
link->link_enc,
clock_source,
@@ -3345,6 +3366,7 @@ void dce110_enable_tmds_link_output(struct dc_link *link,
enum dc_color_depth color_depth,
uint32_t pixel_clock)
{
+ (void)link_res;
link->link_enc->funcs->enable_tmds_output(
link->link_enc,
clock_source,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
index 2a62f63d0357..0689bbf12ad8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
@@ -154,6 +154,10 @@ static bool dce120_enable_display_power_gating(
struct dc_bios *dcb,
enum pipe_gating_control power_gating)
{
+ (void)dc;
+ (void)controller_id;
+ (void)dcb;
+ (void)power_gating;
/* disable for bringup */
#if 0
enum bp_result bp_result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 17ff66d9a617..756ce8379538 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -86,6 +86,7 @@ static void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
+ (void)log_ctx;
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
@@ -252,6 +253,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
static void log_mpc_crc(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
+ (void)log_ctx;
struct dc_context *dc_ctx = dc->ctx;
struct dce_hwseq *hws = dc->hwseq;
@@ -450,6 +452,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
static void dcn10_log_color_state(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
+ (void)log_ctx;
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
bool is_gamut_remap_available = false;
@@ -813,6 +816,7 @@ void dcn10_log_hw_state(struct dc *dc,
bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ (void)dc;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -1181,6 +1185,7 @@ enum dc_status dcn10_enable_stream_timing(
struct dc_state *context,
struct dc *dc)
{
+ (void)context;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
@@ -1284,6 +1289,7 @@ static void dcn10_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
int i;
struct dc_link *link;
DC_LOGGER_INIT(dc->ctx);
@@ -1551,6 +1557,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
+ (void)state;
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx);
@@ -1904,7 +1911,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- int edp_num;
+ unsigned int edp_num;
int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num);
@@ -2004,6 +2011,7 @@ static bool patch_address_for_sbs_tb_stereo(
void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ (void)dc;
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -2030,6 +2038,7 @@ void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
+ (void)dc;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
@@ -2472,6 +2481,7 @@ void dcn10_enable_vblanks_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
+ (void)group_index;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0, master;
@@ -2537,6 +2547,7 @@ void dcn10_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
+ (void)group_index;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0;
@@ -2641,6 +2652,7 @@ static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
struct vm_system_aperture_param *apt,
struct dce_hwseq *hws)
{
+ (void)hubp1;
PHYSICAL_ADDRESS_LOC physical_page_number;
uint32_t logical_addr_low;
uint32_t logical_addr_high;
@@ -2666,6 +2678,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
struct vm_context0_param *vm0,
struct dce_hwseq *hws)
{
+ (void)hubp1;
PHYSICAL_ADDRESS_LOC fb_base;
PHYSICAL_ADDRESS_LOC fb_offset;
uint32_t fb_base_value;
@@ -2724,6 +2737,7 @@ static void dcn10_enable_plane(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
struct dce_hwseq *hws = dc->hwseq;
if (dc->debug.sanity_checks) {
@@ -2821,6 +2835,8 @@ void dcn10_program_output_csc(struct dc *dc,
uint16_t *matrix,
int opp_id)
{
+ (void)dc;
+ (void)opp_id;
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
@@ -3586,6 +3602,10 @@ bool dcn10_dummy_display_power_gating(
struct dc_bios *dcb,
enum pipe_gating_control power_gating)
{
+ (void)dc;
+ (void)controller_id;
+ (void)dcb;
+ (void)power_gating;
return true;
}
@@ -4052,6 +4072,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
uint32_t clk_khz,
uint32_t stepping)
{
+ (void)stepping;
struct dc_state *context = dc->current_state;
struct dc_clock_config clock_cfg = {0};
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 307e8f8060e6..288e4edaa9a2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -75,6 +75,7 @@
void dcn20_log_color_state(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
+ (void)log_ctx;
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
bool is_gamut_remap_available = false;
@@ -379,6 +380,7 @@ void dcn20_program_triple_buffer(
struct pipe_ctx *pipe_ctx,
bool enable_triple_buffer)
{
+ (void)dc;
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
@@ -893,7 +895,7 @@ enum dc_status dcn20_enable_stream_timing(
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
- if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ if (dc_is_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
@@ -1175,6 +1177,8 @@ bool dcn20_set_input_transfer_func(struct dc *dc,
void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ (void)context;
+ (void)dc;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
@@ -1297,6 +1301,7 @@ static void dcn20_power_on_plane_resources(
void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
//}
@@ -2652,6 +2657,7 @@ void dcn20_init_vm_ctx(
struct dc_virtual_addr_space_config *va_config,
int vmid)
{
+ (void)hws;
struct dcn_hubbub_virt_addr_config config;
if (vmid == 0) {
@@ -2670,6 +2676,7 @@ void dcn20_init_vm_ctx(
int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
+ (void)hws;
struct dcn_hubbub_phys_addr_config config;
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
@@ -2799,6 +2806,7 @@ void dcn20_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
struct dccg *dccg = dc->res_pool->dccg;
@@ -2856,7 +2864,7 @@ void dcn20_reset_back_end_for_pipe(
* the case where the same symclk is shared across multiple otg
* instances
*/
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ if (dc_is_tmds_signal(pipe_ctx->stream->signal))
link->phy_state.symclk_ref_cnts.otg = 0;
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
@@ -3245,6 +3253,7 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset)
{
+ (void)dc;
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index d04cfd403b7e..2aa0f1de8103 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -75,6 +75,7 @@
void dcn30_log_color_state(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
+ (void)log_ctx;
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
bool is_gamut_remap_available = false;
@@ -645,7 +646,7 @@ void dcn30_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
int i;
- int edp_num;
+ unsigned int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
@@ -1183,6 +1184,7 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset)
{
+ (void)dc;
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
}
@@ -1237,6 +1239,7 @@ void dcn30_get_underflow_debug_data(const struct dc *dc,
struct timing_generator *tg,
struct dc_underflow_debug_data *out_data)
{
+ (void)tg;
struct hubbub *hubbub = dc->res_pool->hubbub;
if (hubbub) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c
index 3bc56ac346f3..6e1877a8682d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c
@@ -45,20 +45,31 @@
void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on)
{
+ (void)dpp_inst;
+ (void)hws;
+ (void)power_on;
/*DCN303 removes PG registers*/
}
void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
{
+ (void)hubp_inst;
+ (void)hws;
+ (void)power_on;
/*DCN303 removes PG registers*/
}
void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on)
{
+ (void)dsc_inst;
+ (void)hws;
+ (void)power_on;
/*DCN303 removes PG registers*/
}
void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
{
+ (void)enable;
+ (void)hws;
/*DCN303 removes PG registers*/
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 94f63fd54e3e..b4afb2bc4493 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -484,6 +484,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
+ (void)hws;
struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
@@ -511,6 +512,7 @@ static void dcn31_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
struct dc_link *link;
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -548,7 +550,7 @@ static void dcn31_reset_back_end_for_pipe(
* the case where the same symclk is shared across multiple otg
* instances
*/
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ if (dc_is_tmds_signal(pipe_ctx->stream->signal))
link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->top_pipe == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3e239124c17d..858a06b03b57 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -172,6 +172,7 @@ static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_
void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ (void)context;
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index e5d93dd348dd..b45ceb570a5c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -570,6 +570,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
+ (void)dc;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
const struct pwl_params *params = NULL;
@@ -793,7 +794,7 @@ void dcn32_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
int i;
- int edp_num;
+ unsigned int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
@@ -1143,6 +1144,7 @@ static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_
void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ (void)context;
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b5f60f59382e..f133b52ea958 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -428,6 +428,7 @@ static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_
void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ (void)context;
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
@@ -520,7 +521,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- int edp_num;
+ unsigned int edp_num;
int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num);
@@ -816,6 +817,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
/* enable DCFCLK current DCHUB */
@@ -921,7 +923,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
bool hpo_frl_stream_enc_acquired = false;
bool hpo_dp_stream_enc_acquired = false;
int i = 0, j = 0;
- int edp_num = 0;
+ unsigned int edp_num = 0;
struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
memset(update_state, 0, sizeof(struct pg_block_update));
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index a72284c3fa1c..7e6bdefb5471 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -140,9 +140,10 @@ void dcn401_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
int i;
- int edp_num;
+ unsigned int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
+ bool dchub_ref_freq_changed;
int current_dchub_ref_freq = 0;
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
@@ -357,26 +358,29 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+
+ /* sw and fw FAMS versions must match for support */
dc->debug.fams2_config.bits.enable &=
- dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
- if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
- || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
+ dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver;
+ dchub_ref_freq_changed =
+ res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq;
+ if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) &&
+ dc->res_pool->funcs->update_bw_bounding_box &&
+ dc->clk_mgr && dc->clk_mgr->bw_params) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
if (dc->clk_mgr)
- dc->res_pool->funcs->update_bw_bounding_box(dc,
- dc->clk_mgr->bw_params);
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
}
}
-void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx)
+void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
- struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
- primary_dpp_pipe_ctx->plane_res.hubp : NULL;
+ (void)dc;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
- if (primary_hubp && primary_hubp->funcs->hubp_enable_3dlut_fl) {
- primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
}
}
@@ -384,11 +388,8 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
- const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
- primary_dpp_pipe_ctx->plane_res.hubp : NULL;
const struct dc_plane_cm *cm = &plane_state->cm;
int mpcc_id = hubp->inst;
struct mpc *mpc = dc->res_pool->mpc;
@@ -486,41 +487,25 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, primary_hubp->inst);
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
/* HUBP */
- if (primary_hubp->inst == hubp->inst) {
- /* only program if this is the primary dpp pipe for the given plane */
- if (hubp->funcs->hubp_program_3dlut_fl_config)
- hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
- if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
+ if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
- if (hubp->funcs->hubp_enable_3dlut_fl) {
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- } else {
- /* GPU memory only supports fast load path */
- BREAK_TO_DEBUGGER();
- lut_enable = false;
- result = false;
- }
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
} else {
- /* re-trigger priamry HUBP to load 3DLUT */
- if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
- primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
- }
-
- /* clear FL setup on this pipe's HUBP */
- memset(&lut3d_dma, 0, sizeof(lut3d_dma));
- if (hubp->funcs->hubp_program_3dlut_fl_config)
- hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
-
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+ /* GPU memory only supports fast load path */
+ BREAK_TO_DEBUGGER();
+ lut_enable = false;
+ result = false;
}
} else {
/* Legacy (Host) Load Mode */
@@ -572,6 +557,7 @@ bool dcn401_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
+ (void)dc;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
const struct pwl_params *params = NULL;
@@ -633,6 +619,7 @@ static void enable_stream_timing_calc(
struct drr_params *params,
unsigned int *event_triggers)
{
+ (void)dc;
struct dc_stream_state *stream = pipe_ctx->stream;
int i;
@@ -1410,6 +1397,7 @@ void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
+ (void)context;
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
@@ -1830,41 +1818,42 @@ void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
* This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
* of whether OTG lock is currently being held or not.
*/
- const struct pipe_ctx *otg_master_pipe_ctx = resource_get_otg_master(pipe_ctx);
- struct timing_generator *tg = otg_master_pipe_ctx ?
- otg_master_pipe_ctx->stream_res.tg : NULL;
- const struct pipe_ctx *primary_dpp_pipe_ctx = resource_is_pipe_type(pipe_ctx, DPP_PIPE) ?
- resource_get_primary_dpp_pipe(pipe_ctx) : pipe_ctx;
- struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
- primary_dpp_pipe_ctx->plane_res.hubp : NULL;
-
- if (!otg_master_pipe_ctx && !tg) {
- return;
+ struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
+ struct pipe_ctx *odm_pipe, *mpc_pipe;
+ int i, wa_pipe_ct = 0;
+
+ for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
+ for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
+ if (mpc_pipe->plane_state &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
+ wa_pipes[wa_pipe_ct++] = mpc_pipe;
+ }
+ }
}
- if (primary_dpp_pipe_ctx &&
- primary_dpp_pipe_ctx->plane_state &&
- primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_enable &&
- primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_dma_enable) {
- if (tg->funcs->set_vupdate_keepout)
- tg->funcs->set_vupdate_keepout(tg, true);
+ if (wa_pipe_ct > 0) {
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
- if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
- primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
}
- tg->funcs->unlock(tg);
- if (tg->funcs->wait_update_lock_status)
- tg->funcs->wait_update_lock_status(tg, false);
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
+ pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
- if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
- primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
}
- if (tg->funcs->set_vupdate_keepout)
- tg->funcs->set_vupdate_keepout(tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
} else {
- tg->funcs->unlock(tg);
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
}
}
@@ -1883,6 +1872,7 @@ void dcn401_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ (void)context;
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -1938,7 +1928,7 @@ void dcn401_reset_back_end_for_pipe(
* the case where the same symclk is shared across multiple otg
* instances
*/
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ if (dc_is_tmds_signal(pipe_ctx->stream->signal))
link->phy_state.symclk_ref_cnts.otg = 0;
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
@@ -3258,6 +3248,7 @@ void dcn401_update_writeback_sequence(
struct dc_state *context,
struct block_sequence_state *seq_state)
{
+ (void)context;
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -3463,6 +3454,7 @@ void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct block_sequence_state *seq_state)
{
+ (void)context;
struct dce_hwseq *hws = dc->hwseq;
uint32_t org_ip_request_cntl = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index b9a03ffa2717..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -41,7 +41,8 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
bool dcn401_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream);
-void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx);
+void dcn401_trigger_3dlut_dma_load(struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
unsigned int *tmds_div);
enum dc_status dcn401_enable_stream_timing(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
index 7f9c121c00e6..46f2f9833d9e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
@@ -66,7 +66,7 @@ void dcn42_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
int i;
- int edp_num;
+ unsigned int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bool dchub_ref_freq_changed;
@@ -386,6 +386,7 @@ void dcn42_program_cm_hist(
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
+ (void)dc;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
if (dpp && dpp->funcs->dpp_cm_hist_control)
@@ -1000,6 +1001,7 @@ void dcn42_root_clock_control(struct dc *dc,
}
void dcn42_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
+ (void)dc;
struct crtc_stereo_flags flags = { 0 };
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -1063,7 +1065,7 @@ void dcn42_power_down_on_boot(struct dc *dc)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- int edp_num;
+ unsigned int edp_num;
int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 98abe0d2d30f..d1dba7ffcd9b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -1120,7 +1120,7 @@ struct hw_sequencer_funcs {
void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix, int opp_id);
- void (*trigger_3dlut_dma_load)(struct pipe_ctx *pipe_ctx);
+ void (*trigger_3dlut_dma_load)(struct dc *dc, struct pipe_ctx *pipe_ctx);
/* VM Related */
int (*init_sys_ctx)(struct dce_hwseq *hws,
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 676df39079fc..015f3659cf77 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -92,7 +92,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
.enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
.enable_value = {\
DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
- ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
+ (uint32_t)~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
},\
.ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
@@ -107,7 +107,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
.enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
.enable_value = {\
DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
- ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
+ (uint32_t)~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
.ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
.ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
@@ -121,7 +121,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
.enable_value = {\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
- ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ (uint32_t)~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
.ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
.ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
@@ -136,7 +136,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
.enable_value = {\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
- ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ (uint32_t)~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
.ack_mask =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
@@ -152,7 +152,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
.enable_value = {\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
- ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ (uint32_t)~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.ack_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
@@ -183,6 +183,7 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
+ (void)enable;
DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
__func__, info->src_id, info->ext_id);
@@ -328,6 +329,7 @@ enum dc_irq_source to_dal_irq_source_dce110(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
switch (src_id) {
case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK1;
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index b473dae2abbb..dbab6e3737a1 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -79,7 +79,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index b5c5f42cf8f2..3e19dfdd0474 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -68,7 +68,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
- ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
+ (uint32_t)~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
},\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
@@ -83,7 +83,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
- ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
+ (uint32_t)~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
.ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
@@ -98,7 +98,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
.enable_value = {\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
- ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ (uint32_t)~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
.ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
.ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
@@ -113,7 +113,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
.enable_value = {\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
- ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ (uint32_t)~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
.ack_mask =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
@@ -129,7 +129,7 @@ static struct irq_source_info_funcs vupdate_irq_info_funcs = {
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
.enable_value = {\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
- ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ (uint32_t)~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.ack_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index ca2e13702fbb..0bdb62b883aa 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -42,6 +42,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -176,7 +179,7 @@ static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 1c4c51abc259..8a0f4b5d6956 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -43,6 +43,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -179,7 +182,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 9e0881472e38..9d13c0cc91f0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -42,6 +42,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -189,7 +192,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index 92bcd35723ca..78338af86666 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -50,6 +50,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -196,7 +199,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 16685d066c1a..2f47a9fbcd43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -37,6 +37,9 @@
static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id)
{
+ (void)ext_id;
+ (void)irq_service;
+ (void)src_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -180,7 +183,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
@@ -199,7 +202,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 01d83e1922d6..236a7278a8cf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -38,6 +38,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -123,7 +126,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 2114c5669e6e..213e5da31b19 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -40,6 +40,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -184,7 +187,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -198,7 +201,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index 16f158e0fb60..8aacc229b002 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -42,6 +42,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -186,7 +189,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -200,7 +203,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 8ee03c006ad6..b3bddc87afed 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -47,6 +47,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -191,7 +194,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -205,7 +208,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index 07e6f7dd6b99..f407ba72acdb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -41,6 +41,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -195,7 +198,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -209,7 +212,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index 3d28a5007f53..2f2985075f88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -39,6 +39,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -184,7 +187,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base + reg_num].enable_value[0] = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base + reg_num].enable_value[1] = \
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
REG_STRUCT[base + reg_num].ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
@@ -198,7 +201,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base].enable_value[0] = \
reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base].enable_value[1] = \
- ~reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
REG_STRUCT[base].ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index f716c2590876..1ed75b53e131 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -18,6 +18,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -163,7 +166,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base + reg_num].enable_value[0] = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base + reg_num].enable_value[1] = \
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
REG_STRUCT[base + reg_num].ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
@@ -177,7 +180,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base].enable_value[0] = \
reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base].enable_value[1] = \
- ~reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
REG_STRUCT[base].ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
index e718004901cf..4c321c26f02f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -17,6 +17,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -162,7 +165,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base + reg_num].enable_value[0] = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base + reg_num].enable_value[1] = \
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
REG_STRUCT[base + reg_num].ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
@@ -176,7 +179,7 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
REG_STRUCT[base].enable_value[0] = \
reg1 ## __ ## mask1 ## _MASK,\
REG_STRUCT[base].enable_value[1] = \
- ~reg1 ## __ ## mask1 ## _MASK, \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \
REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
REG_STRUCT[base].ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index 2cde50b2ae22..059c5c636fd9 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -20,6 +20,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -175,7 +178,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -189,7 +192,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c b/drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c
index 19e0741c62cd..f4d1ce9079de 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c
@@ -19,6 +19,9 @@ static enum dc_irq_source to_dal_irq_source_dcn42(
uint32_t src_id,
uint32_t ext_id)
{
+ (void)irq_service;
+ (void)src_id;
+ (void)ext_id;
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
@@ -173,7 +176,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
- ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
@@ -187,7 +190,7 @@ static struct irq_source_info_funcs vline2_irq_info_funcs = {
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
- ~reg1 ## __ ## mask1 ## _MASK \
+ (uint32_t)~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 693d852b1c40..060460abc377 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -66,6 +66,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
+ (void)skip_video_pattern;
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
struct dc_stream_update stream_update = { 0 };
@@ -483,6 +484,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
enum dp_test_pattern_color_space test_pattern_color_space)
{
+ (void)test_pattern_color_space;
enum controller_dp_test_pattern controller_test_pattern;
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 81bf3c5e1fdf..2a87b23582f3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -81,6 +81,10 @@ static void set_dio_dpia_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
+ (void)link;
+ (void)link_res;
+ (void)link_settings;
+ (void)lane_settings;
}
static void enable_dpia_link_output(struct dc_link *link,
@@ -145,13 +149,9 @@ static void disable_dpia_link_output(struct dc_link *link,
}
static const struct link_hwss dpia_link_hwss = {
- .setup_stream_encoder = setup_dio_stream_encoder,
- .reset_stream_encoder = reset_dio_stream_encoder,
- .setup_stream_attribute = setup_dio_stream_attribute,
- .disable_link_output = disable_dpia_link_output,
- .setup_audio_output = setup_dio_audio_output,
- .enable_audio_packet = enable_dio_audio_packet,
- .disable_audio_packet = disable_dio_audio_packet,
+ /* Ensure initialization order matches the declaration in link_hwss.h
+ * for strict compiler compliance and consistency across HWSS implementations
+ */
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dpia_link_output,
@@ -159,6 +159,14 @@ static const struct link_hwss dpia_link_hwss = {
.set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
},
+
+ .setup_stream_encoder = setup_dio_stream_encoder,
+ .reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dpia_link_output,
+ .setup_audio_output = setup_dio_audio_output,
+ .enable_audio_packet = enable_dio_audio_packet,
+ .disable_audio_packet = disable_dio_audio_packet,
};
bool can_use_dpia_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index cec68c5dba13..dbbedeeed298 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -110,6 +110,8 @@ void enable_hpo_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
+ (void)signal;
+ (void)clock_source;
if (!link_res->hpo_dp_link_enc) {
DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
return;
@@ -160,6 +162,7 @@ static void set_hpo_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
+ (void)link;
link_res->hpo_dp_link_enc->funcs->set_ffe(
link_res->hpo_dp_link_enc,
link_settings,
@@ -170,6 +173,7 @@ void update_hpo_dp_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
+ (void)link;
link_res->hpo_dp_link_enc->funcs->update_stream_allocation_table(
link_res->hpo_dp_link_enc,
table);
@@ -178,6 +182,7 @@ void update_hpo_dp_stream_allocation_table(struct dc_link *link,
void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output, uint32_t audio_inst)
{
+ (void)audio_output;
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.hpo_dp_stream_enc,
audio_inst,
@@ -218,6 +223,7 @@ static const struct link_hwss hpo_dp_link_hwss = {
bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
+ (void)link;
return link_res->hpo_dp_link_enc != NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 55c5148de800..6d5b7450b205 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,6 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
+ (void)tp_params;
uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c
index 64742c24f7e6..3aa1375cec71 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_virtual.c
@@ -27,20 +27,26 @@
void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx)
{
+ (void)pipe_ctx;
}
void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
{
+ (void)pipe_ctx;
}
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
+ (void)pipe_ctx;
}
static void virtual_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
+ (void)link;
+ (void)link_res;
+ (void)signal;
}
static const struct link_hwss virtual_link_hwss = {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 7f1761080aba..59851924bfcd 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -781,8 +781,10 @@ static void restore_phy_clocks_for_destructive_link_verification(const struct dc
}
static void verify_link_capability_destructive(struct dc_link *link,
+ struct dc_sink *sink,
enum dc_detect_reason reason)
{
+ (void)sink;
bool should_prepare_phy_clocks =
should_prepare_phy_clocks_for_link_verification(link->dc, reason);
@@ -821,6 +823,7 @@ static void verify_link_capability_non_destructive(struct dc_link *link)
static bool should_verify_link_capability_destructively(struct dc_link *link,
enum dc_detect_reason reason)
{
+ (void)reason;
bool destrictive = false;
struct dc_link_settings max_link_cap;
bool is_link_enc_unavailable = false;
@@ -854,11 +857,11 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
return destrictive;
}
-static void verify_link_capability(struct dc_link *link,
+static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
enum dc_detect_reason reason)
{
if (should_verify_link_capability_destructively(link, reason))
- verify_link_capability_destructive(link, reason);
+ verify_link_capability_destructive(link, sink, reason);
else
verify_link_capability_non_destructive(link);
}
@@ -1452,9 +1455,8 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
- if (is_local_sink_detect_success && link->local_sink) {
- verify_link_capability(link, reason);
- }
+ if (is_local_sink_detect_success && link->local_sink)
+ verify_link_capability(link, link->local_sink, reason);
DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index e12c25896364..b4f46408a000 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -181,8 +181,7 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
/* link can be also enabled by vbios. In this case it is not recorded
* in pipe_ctx. Disable link phy here to make sure it is completely off
*/
- if (dc_is_dp_signal(link->connector_signal))
- dp_disable_link_phy(link, &link_res, link->connector_signal);
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
}
void link_resume(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index acdc162de535..eb791285ed06 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -391,6 +391,7 @@ static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_s
*/
enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
+ (void)dc;
struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
uint8_t link_count = 0;
enum dc_status result = DC_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 08e2b572e0ff..ddff0db4ce70 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -554,6 +554,7 @@ enum link_training_result dp_check_link_loss_status(
struct dc_link *link,
const struct link_training_settings *link_training_setting)
{
+ (void)link_training_setting;
enum link_training_result status = LINK_TRAINING_SUCCESS;
union lane_status lane_status;
union lane_align_status_updated dpcd_lane_status_updated;
@@ -1387,6 +1388,7 @@ bool dp_set_hw_training_pattern(
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
+ (void)offset;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
switch (pattern) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index 11565f187ac7..1a6bfc45927d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -158,6 +158,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
+ (void)link_res;
/* Assumption: assume hardware has transmitted eq pattern */
enum dc_status status = DC_OK;
enum link_training_result result = LINK_TRAINING_SUCCESS;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 603537ffd128..34fa76d97b83 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -172,6 +172,7 @@ static uint8_t dpia_build_set_config_data(
struct dc_link *link,
struct link_training_settings *lt_settings)
{
+ (void)link;
union dpia_set_config_data data;
data.raw = 0;
@@ -290,6 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
struct link_training_settings *lt_settings,
uint32_t hop)
{
+ (void)link_res;
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
enum dc_status status = DC_ERROR_UNEXPECTED;
@@ -457,6 +459,7 @@ static enum link_training_result dpia_training_cr_transparent(
const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
+ (void)link_res;
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
enum dc_status status;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
@@ -585,6 +588,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
struct link_training_settings *lt_settings,
uint32_t hop)
{
+ (void)link_res;
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
uint32_t retries_eq = 0;
@@ -730,6 +734,7 @@ static enum link_training_result dpia_training_eq_transparent(
const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
+ (void)link_res;
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
uint32_t retries_eq = 0;
enum dc_status status;
@@ -991,6 +996,7 @@ enum link_training_result dpia_perform_link_training(
const struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
+ (void)skip_video_pattern;
enum link_training_result result;
struct link_training_settings lt_settings = {0};
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index 584b9295a12a..e4c2aa2bc364 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -180,6 +180,7 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data,
const uint32_t reduced_size)
{
+ (void)extended_size;
const uint32_t offset = reduced_address - extended_address;
/*
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 6bfd2c1294e5..4c7bb0522a8c 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -117,6 +117,9 @@ void mpc3_set_out_rate_control(
bool rate_2x_mode,
struct mpc_dwb_flow_control *flow_control)
{
+ (void)enable;
+ (void)rate_2x_mode;
+ (void)flow_control;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
/* Always disable mpc out rate and flow control.
@@ -908,6 +911,7 @@ static void mpc3_set_3dlut_mode(
bool is_lut_size17x17x17,
uint32_t rmu_idx)
{
+ (void)is_color_channel_12bits;
uint32_t lut_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
@@ -1428,7 +1432,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)
}
//no vacant RMU units or invalid parameters acquire_post_bldn_3dlut
- return -1;
+ return (uint32_t)-1;
}
static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index badcef027b84..1f15ada109b6 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -884,6 +884,7 @@ void mpc32_set_3dlut_mode(
bool is_lut_size17x17x17,
uint32_t mpcc_id)
{
+ (void)is_color_channel_12bits;
uint32_t lut_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 45d418636d0c..b49bd155cad4 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
@@ -250,6 +250,7 @@ void opp1_set_dyn_expansion(
enum dc_color_depth color_dpth,
enum signal_type signal)
{
+ (void)color_sp;
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 6f7b0f816f2a..9e66b9b97c63 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -164,6 +164,7 @@ void optc1_program_timing(
const enum signal_type signal,
bool use_vbios)
{
+ (void)use_vbios;
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
uint32_t asic_blank_start;
@@ -855,6 +856,8 @@ void optc1_set_early_control(
struct timing_generator *optc,
uint32_t early_cntl)
{
+ (void)optc;
+ (void)early_cntl;
/* asic design change, do not need this control
* empty for share caller logic
*/
@@ -1249,6 +1252,7 @@ void optc1_get_crtc_scanoutpos(
static void optc1_enable_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
+ (void)timing;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (flags) {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index e7a90a437fff..39ce4d4a61a1 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -181,6 +181,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask;
@@ -261,6 +262,7 @@ static void optc2_align_vblanks(
uint8_t master_clock_divider,
uint8_t slave_clock_divider)
{
+ (void)slave_clock_divider;
/* accessing slave OTG registers */
struct optc *optc1 = DCN10TG_FROM_TG(optc_slave);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index ee4665aa49e9..d72574db1f07 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -218,6 +218,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 893d2aff1f82..5f53f8747812 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -43,6 +43,7 @@
static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
int mem_count_per_opp = (segment_width + 2559) / 2560;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 43ff957288b2..a7cf34937b2f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -50,6 +50,7 @@
static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
int h_active = segment_width * opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 3dcb0d0c931c..60e546b69a05 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -45,6 +45,7 @@
static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
int h_active = segment_width * opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5aafd0eedf66..a880e4a6d165 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -58,6 +58,7 @@
static void optc35_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
int segment_width, int last_segment_width)
{
+ (void)last_segment_width;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
int h_active = segment_width * opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index fdcf8db6be50..caafebe92129 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -632,6 +632,7 @@ static struct link_encoder *dce100_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dce110_link_encoder *enc110 =
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
@@ -849,6 +850,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
+ (void)dc;
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
@@ -866,6 +868,7 @@ enum dc_status dce100_validate_bandwidth(
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
+ (void)validate_mode;
int i;
bool at_least_one_pipe = false;
struct dc_stream_state *stream = NULL;
@@ -926,6 +929,7 @@ enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
if (!dce100_validate_surface_sets(context))
return DC_FAIL_SURFACE_VALIDATE;
@@ -961,6 +965,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool)
enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
{
+ (void)caps;
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return DC_OK;
@@ -1039,7 +1044,7 @@ static bool dce100_resource_construct(
pool->base.res_cap = &res_cap;
pool->base.funcs = &dce100_res_pool_funcs;
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
bp = ctx->dc_bios;
@@ -1111,7 +1116,7 @@ static bool dce100_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap.num_timing_generator;
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index ab71f645c90e..f83acfe7a15e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -667,6 +667,7 @@ static struct link_encoder *dce110_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dce110_link_encoder *enc110 =
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
@@ -971,6 +972,7 @@ static enum dc_status dce110_validate_bandwidth(
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
+ (void)validate_mode;
bool result = false;
DC_LOG_BANDWIDTH_CALCS(
@@ -1043,6 +1045,7 @@ static enum dc_status dce110_validate_bandwidth(
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
struct dc_caps *caps)
{
+ (void)caps;
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
return DC_FAIL_SURFACE_VALIDATE;
@@ -1099,6 +1102,7 @@ static enum dc_status dce110_validate_global(
struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
if (!dce110_validate_surface_sets(context))
return DC_FAIL_SURFACE_VALIDATE;
@@ -1130,6 +1134,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
+ (void)cur_ctx;
struct dc_stream_state *stream = opp_head_pipe->stream;
struct dc *dc = stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
@@ -1354,6 +1359,7 @@ static bool dce110_resource_construct(
struct dce110_resource_pool *pool,
struct hw_asic_id asic_id)
{
+ (void)asic_id;
unsigned int i;
struct dc_context *ctx = dc->ctx;
struct dc_bios *bp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index b7051bfd4326..458b14e4cb97 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -628,6 +628,7 @@ static struct link_encoder *dce112_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dce110_link_encoder *enc110 =
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
@@ -852,6 +853,7 @@ static struct clock_source *find_matching_pll(
const struct resource_pool *pool,
const struct dc_stream_state *const stream)
{
+ (void)res_ctx;
switch (stream->link->link_enc->transmitter) {
case TRANSMITTER_UNIPHY_A:
return pool->clock_sources[DCE112_CLK_SRC_PLL0];
@@ -875,6 +877,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
+ (void)dc;
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
@@ -892,6 +895,7 @@ enum dc_status dce112_validate_bandwidth(
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
+ (void)validate_mode;
bool result = false;
DC_LOG_BANDWIDTH_CALCS(
@@ -1037,6 +1041,7 @@ static enum dc_status dce112_validate_global(
struct dc *dc,
struct dc_state *context)
{
+ (void)dc;
if (!dce112_validate_surface_sets(context))
return DC_FAIL_SURFACE_VALIDATE;
@@ -1240,7 +1245,7 @@ static bool dce112_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 7ee70f7b3aa7..56bbf9dc1691 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -712,6 +712,7 @@ static struct link_encoder *dce120_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dce110_link_encoder *enc110 =
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
@@ -1081,7 +1082,7 @@ static bool dce120_resource_construct(
/* TODO: Fill more data from GreenlandAsicCapability.cpp */
pool->base.pipe_count = res_cap.num_timing_generator;
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 89927727a0d9..33be49b3c1b1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -734,6 +734,7 @@ static struct link_encoder *dce80_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dce110_link_encoder *enc110 =
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
@@ -934,7 +935,7 @@ static bool dce80_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap.num_timing_generator;
pool->base.timing_generator_count = res_cap.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
@@ -1137,7 +1138,7 @@ static bool dce81_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap_81.num_timing_generator;
pool->base.timing_generator_count = res_cap_81.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
@@ -1337,7 +1338,7 @@ static bool dce83_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap_83.num_timing_generator;
pool->base.timing_generator_count = res_cap_83.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 44178e915bdc..250c3975b9e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -736,6 +736,7 @@ static struct link_encoder *dcn10_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn10_link_encoder *enc10 =
kzalloc_obj(struct dcn10_link_encoder);
int link_regs_id;
@@ -1049,6 +1050,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
+ (void)dc;
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
@@ -1083,6 +1085,7 @@ static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer(
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
+ (void)cur_ctx;
struct resource_context *res_ctx = &new_ctx->res_ctx;
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
@@ -1346,7 +1349,7 @@ static bool dcn10_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
/* max pipe num for ASIC before check pipe fuses */
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 8426d5f9f377..bd5c18ee35e7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -916,6 +916,7 @@ struct link_encoder *dcn20_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
int link_regs_id;
@@ -1310,6 +1311,7 @@ static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
{
+ (void)dc;
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
@@ -1537,6 +1539,7 @@ void dcn20_split_stream_for_mpc(
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe)
{
+ (void)res_ctx;
int pipe_idx = secondary_pipe->pipe_idx;
struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
@@ -1682,6 +1685,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
{
+ (void)pool;
struct pipe_ctx *secondary_pipe = NULL;
if (dc && primary_pipe) {
@@ -1814,6 +1818,11 @@ void dcn20_merge_pipes_for_validate(
}
}
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
@@ -1898,8 +1907,15 @@ int dcn20_validate_apply_pipe_split_flags(
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
int pipe_plane = v->pipe_plane[pipe_idx];
- bool split4mpc = context->stream_count == 1 && plane_count == 1
- && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
+ bool split4mpc = false;
+
+ if (context->stream_count == 1 && plane_count == 1
+ && dc->config.allow_4to1MPC && dc->res_pool->pipe_count >= 4
+ && !dc->debug.disable_z9_mpc
+ && pipe->plane_state && is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920
+ && pipe->plane_state->src_rect.height <= 1080)
+ split4mpc = true;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -2149,6 +2165,7 @@ struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
const struct resource_pool *pool,
const struct pipe_ctx *opp_head)
{
+ (void)cur_ctx;
struct resource_context *res_ctx = &new_ctx->res_ctx;
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
@@ -2331,6 +2348,7 @@ static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
{
+ (void)hw_internal_rev;
return DML_PROJECT_NAVI10v2;
}
@@ -2417,7 +2435,7 @@ static bool dcn20_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index a1a529cabb93..bd19168a3f77 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -772,7 +772,8 @@ bool dcn21_fast_validate_bw(struct dc *dc,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- enum dc_validate_mode validate_mode)
+ enum dc_validate_mode validate_mode,
+ bool allow_self_refresh_only)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -803,18 +804,23 @@ bool dcn21_fast_validate_bw(struct dc *dc,
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel > context->bw_ctx.dml.soc.num_states) {
- /*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
- *
- * We don't actually support prefetch mode 2, so require that we
- * at least support prefetch mode 1.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (vlevel > context->bw_ctx.dml.soc.num_states)
+
+ if (allow_self_refresh_only) {
+ /*
+ * If mode is unsupported or there's still no p-state support then
+ * fall back to favoring voltage.
+ *
+ * We don't actually support prefetch mode 2, so require that we
+ * at least support prefetch mode 1.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+ } else {
goto validate_fail;
+ }
}
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
@@ -1293,6 +1299,7 @@ static struct link_encoder *dcn21_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn21_link_encoder *enc21 =
kzalloc_obj(struct dcn21_link_encoder);
int link_regs_id;
@@ -1402,7 +1409,7 @@ static bool dcn21_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
/* max pipe num for ASIC before check pipe fuses */
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index a017fd9854d1..23d3a36872bb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
@@ -51,6 +51,7 @@ bool dcn21_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- enum dc_validate_mode validate_mode);
+ enum dc_validate_mode validate_mode,
+ bool allow_self_refresh_only);
#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 566517b99a09..5742effef7ae 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -924,6 +924,7 @@ static struct link_encoder *dcn30_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1641,6 +1642,8 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ dcn20_merge_pipes_for_validate(dc, context);
+
context->bw_ctx.dml.vba.maxMpcComb = 0;
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
@@ -2295,7 +2298,7 @@ static bool dcn30_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index d21b928055e5..9773896e0801 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -880,6 +880,7 @@ static struct link_encoder *dcn301_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1428,7 +1429,7 @@ static bool dcn301_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index d24b9b81df77..d9f12a6f225f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -894,6 +894,7 @@ static struct link_encoder *dcn302_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder);
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -1218,7 +1219,7 @@ static bool dcn302_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->pipe_count = pool->res_cap->num_timing_generator;
pool->mpcc_count = pool->res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 0b44a33a0d32..f0c75db81b2c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -839,6 +839,7 @@ static struct link_encoder *dcn303_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder);
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -1159,7 +1160,7 @@ static bool dcn303_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->pipe_count = pool->res_cap->num_timing_generator;
pool->mpcc_count = pool->res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 6ce6b2b1f288..afcc4dff6abc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1090,6 +1090,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1699,12 +1700,9 @@ int dcn31_populate_dml_pipes_from_context(
pipe_cnt++;
}
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE;
- dc->config.enable_4to1MPC = false;
+
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
@@ -1897,7 +1895,7 @@ static bool dcn31_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
@@ -1922,6 +1920,9 @@ static bool dcn31_resource_construct(
dc->caps.is_apu = true;
dc->caps.zstate_support = true;
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
+
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
@@ -1963,8 +1964,6 @@ static bool dcn31_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
- dc->config.no_native422_support = true;
-
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2245,6 +2244,7 @@ enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
struct pipe_ctx *pipes,
struct audio_output *audio_output)
{
+ (void)link_setting;
struct dc_state *state = link->dc->current_state;
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 66bf7725aeaf..654b4e97807e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1148,6 +1148,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1827,9 +1828,12 @@ static bool dcn314_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 0e0c52128c55..f424fd4d5a45 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1089,6 +1089,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1785,11 +1786,9 @@ static int dcn315_populate_dml_pipes_from_context(
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
- dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format)
@@ -1868,8 +1867,11 @@ static bool dcn315_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
@@ -1925,8 +1927,6 @@ static bool dcn315_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
- dc->config.no_native422_support = true;
-
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 63675b53674a..e0dc8aaaaaa1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1082,6 +1082,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
+ (void)ctx;
struct dcn20_link_encoder *enc20 =
kzalloc_obj(struct dcn20_link_encoder);
@@ -1669,11 +1670,9 @@ static int dcn316_populate_dml_pipes_from_context(
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE;
ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE);
- dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format)) {
@@ -1743,9 +1742,13 @@ static bool dcn316_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
+
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 2b9d8d224572..3c0d046ab747 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2191,7 +2191,7 @@ static bool dcn32_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.timing_generator_count = num_pipes;
pool->base.pipe_count = num_pipes;
pool->base.mpcc_count = num_pipes;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
index f5a4e97c40ce..4808c793590f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
@@ -41,6 +41,7 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct pipe_ctx *pipe_ctx,
bool ignore_cursor_buf)
{
+ (void)dc;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
uint32_t cursor_mall_size_bytes = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index e3dc4b1aacda..b8ae6e8397ef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1695,7 +1695,7 @@ static bool dcn321_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.timing_generator_count = num_pipes;
pool->base.pipe_count = num_pipes;
pool->base.mpcc_count = num_pipes;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ff557c4d594e..825ecaf9c580 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1738,9 +1738,11 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
{
bool out = false;
+ DC_FP_START();
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
validate_mode);
+ DC_FP_END();
if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1774,6 +1776,12 @@ static int populate_dml_pipes_from_context_fpu(struct dc *dc,
return ret;
}
+void dcn35_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ DC_FP_START();
+ dcn35_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
+}
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1795,7 +1803,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
+ .update_bw_bounding_box = dcn35_update_bw_bounding_box,
.patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
@@ -1827,6 +1835,9 @@ static bool dcn35_resource_construct(
clk_src_regs_init(3, D),
clk_src_regs_init(4, E);
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
+
#undef REG_STRUCT
#define REG_STRUCT abm_regs
abm_regs_init(0),
@@ -1847,7 +1858,7 @@ static bool dcn35_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index 9c56ae76e0c7..6c2c61c711b9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -312,4 +312,5 @@ struct resource_pool *dcn35_create_resource_pool(
#define DPP_REG_LIST_DCN35_RI(id)\
DPP_REG_LIST_DCN30_COMMON_RI(id)
+void dcn35_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
#endif /* _DCN35_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0c39d0b17947..00286cba5742 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1718,9 +1718,11 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
{
bool out = false;
+ DC_FP_START();
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
validate_mode);
+ DC_FP_END();
if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1747,6 +1749,12 @@ static int populate_dml_pipes_from_context_fpu(struct dc *dc,
}
+static void dcn351_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ DC_FP_START();
+ dcn351_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
+}
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1768,7 +1776,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
+ .update_bw_bounding_box = dcn351_update_bw_bounding_box,
.patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
@@ -1800,6 +1808,9 @@ static bool dcn351_resource_construct(
clk_src_regs_init(3, D),
clk_src_regs_init(4, E);
+ /* Enable 4to1MPC by default */
+ dc->config.allow_4to1MPC = true;
+
#undef REG_STRUCT
#define REG_STRUCT abm_regs
abm_regs_init(0),
@@ -1820,7 +1831,7 @@ static bool dcn351_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 1ad44fb64213..7c4519d57e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1725,9 +1725,11 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
{
bool out = false;
+ DC_FP_START();
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
validate_mode);
+ DC_FP_END();
if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1775,7 +1777,7 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
+ .update_bw_bounding_box = dcn35_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
@@ -1826,7 +1828,7 @@ static bool dcn36_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 491860cc8378..cb93bfbe9e9e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1643,8 +1643,10 @@ static struct dc_cap_funcs cap_funcs = {
.get_subvp_en = dcn32_subvp_in_use,
};
-static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+static void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
+ dc_assert_fp_enabled();
+
/* re-calculate the available MALL size if required */
if (bw_params->num_channels > 0) {
dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
@@ -1653,17 +1655,19 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
}
- DC_FP_START();
-
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
+}
+static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ DC_FP_START();
+ dcn401_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
-
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
plane_state->tiling_info.gfxversion = DcGfxAddr3;
@@ -1688,10 +1692,13 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
}
}
- if (dc->debug.using_dml2)
+ if (dc->debug.using_dml2) {
+ DC_FP_START();
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ DC_FP_END();
+ }
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
/* check new stream configuration still supports cursor if subvp used */
@@ -1710,10 +1717,13 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
/* attempt to validate again with subvp disabled due to cursor */
- if (dc->debug.using_dml2)
+ if (dc->debug.using_dml2) {
+ DC_FP_START();
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ DC_FP_END();
+ }
}
return status;
@@ -1722,9 +1732,13 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
void dcn401_prepare_mcache_programming(struct dc *dc,
struct dc_state *context)
{
- if (dc->debug.using_dml21)
+ if (dc->debug.using_dml21) {
+ DC_FP_START();
dml2_prepare_mcache_programming(dc, context,
- context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2);
+ context->power_source == DC_POWER_SOURCE_DC ?
+ context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2);
+ DC_FP_END();
+ }
}
static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
@@ -1915,7 +1929,7 @@ static bool dcn401_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.timing_generator_count = num_pipes;
pool->base.pipe_count = num_pipes;
pool->base.mpcc_count = num_pipes;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
index 6328b3dc35f9..c0d37f00fed9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
@@ -761,6 +761,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.ignore_pg = true,
.disable_stutter_for_wm_program = true,
.min_deep_sleep_dcfclk_khz = 8000,
+ .replay_skip_crtc_disabled = true,
+ .psr_skip_crtc_disable = true,
};
static const struct dc_check_config config_defaults = {
@@ -1694,37 +1696,51 @@ static void dcn42_destroy_resource_pool(struct resource_pool **pool)
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap};
-static void dcn42_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+static void dcn42_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
- DC_FP_START();
+ (void)bw_params;
+ dc_assert_fp_enabled();
+
if (dc->current_state && dc->current_state->bw_ctx.dml2)
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- DC_FP_END();
}
+static void dcn42_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ DC_FP_START();
+ dcn42_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
+}
enum dc_status dcn42_validate_bandwidth(struct dc *dc,
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
bool out = false;
+ DC_FP_START();
+
out = dml2_validate(dc, context, context->bw_ctx.dml2,
validate_mode);
- DC_FP_START();
+
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
/*not required for mode enumeration*/
dcn42_decide_zstate_support(dc, context);
}
+
DC_FP_END();
+
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn42_prepare_mcache_programming(struct dc *dc,
struct dc_state *context)
{
- if (dc->debug.using_dml21)
+ if (dc->debug.using_dml21) {
+ DC_FP_START();
dml2_prepare_mcache_programming(dc, context,
context->power_source == DC_POWER_SOURCE_DC ?
- context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2);
+ context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2);
+ DC_FP_END();
+ }
}
/* Create a minimal link encoder object not associated with a particular
* physical connector.
@@ -1759,6 +1775,8 @@ static unsigned int dcn42_get_max_hw_cursor_size(const struct dc *dc,
struct dc_state *state,
const struct dc_stream_state *stream)
{
+ (void)state;
+ (void)stream;
return dc->caps.max_cursor_size;
}
static struct resource_funcs dcn42_res_pool_funcs = {
@@ -1783,7 +1801,7 @@ static struct resource_funcs dcn42_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn42_update_bw_bounding_box,
- .patch_unknown_plane_state = dcn401_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn42_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn42_get_preferred_eng_id_dpia,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
@@ -1864,7 +1882,7 @@ static bool dcn42_resource_construct(
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool->base.pipe_count = num_pipes;
pool->base.mpcc_count = num_pipes;
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
index 1c04171b296c..146a6e47934b 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -159,6 +159,7 @@ static void dcn42_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc
static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
{
+ (void)config;
/* Individual modification can be overwritten even if it was obtained by a previous function.
* Modifications are acquired in order of priority (lowest to highest).
*/
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 8fbd179a4c87..fe9431cea3e5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -6272,6 +6272,7 @@ struct dmub_cmd_edid_cea_amd_vsdb {
uint16_t amd_vsdb_version; /**< AMD VSDB version */
uint16_t min_frame_rate; /**< Maximum frame rate */
uint16_t max_frame_rate; /**< Minimum frame rate */
+ uint8_t freesync_mccs_vcp_code; /**< Freesync MCCS VCP code */
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 54df2147e4dc..0b152926f75b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -194,6 +194,7 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)region6;
union dmub_addr offset;
uint64_t fb_base, fb_offset;
@@ -396,6 +397,7 @@ union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub)
void dmub_dcn20_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
+ (void)params;
union dmub_fw_boot_options boot_options = {0};
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
@@ -460,20 +462,26 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
+ ASSERT(is_sec_reset <= 0xFF);
+ dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
+ ASSERT(is_cw0_enabled <= 0xFF);
+ dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index 84a6eb3f677d..23a33db07edc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -127,6 +127,7 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)region6;
union dmub_addr offset;
/* sienna_cichlid has hardwired virtual addressing for CW2-CW7 */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index a0cefc03b21d..478d79a6e246 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -195,6 +195,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)cw2;
+ (void)region6;
union dmub_addr offset;
offset = cw3->offset;
@@ -466,25 +468,32 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
- dmub->debug.is_pwait = is_pwait;
+ ASSERT(is_pwait <= 0xFF);
+ dmub->debug.is_pwait = (uint8_t)is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
+ ASSERT(is_sec_reset <= 0xFF);
+ dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
+ ASSERT(is_cw0_enabled <= 0xFF);
+ dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 2f99a2772599..3f9fb9e05b79 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -237,6 +237,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)cw2;
+ (void)region6;
union dmub_addr offset;
offset = cw3->offset;
@@ -486,19 +488,24 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
- dmub->debug.is_pwait = is_pwait;
+ ASSERT(is_pwait <= 0xFF);
+ dmub->debug.is_pwait = (uint8_t)is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 639f9835e5e9..69fb6084232e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -222,6 +222,7 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)cw2;
union dmub_addr offset;
offset = cw3->offset;
@@ -402,7 +403,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
union dmub_fw_boot_options boot_options = {0};
if (!dmub->dpia_supported) {
- dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia != 0;
}
boot_options.bits.z10_disable = params->disable_z10;
@@ -508,19 +509,24 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
- dmub->debug.is_pwait = is_pwait;
+ ASSERT(is_pwait <= 0xFF);
+ dmub->debug.is_pwait = (uint8_t)is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 16ed07f0e96d..e5a78df80d72 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -213,6 +213,7 @@ void dmub_dcn401_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)cw2;
union dmub_addr offset;
offset = cw3->offset;
@@ -473,25 +474,32 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
- dmub->debug.is_pwait = is_pwait;
+ ASSERT(is_pwait <= 0xFF);
+ dmub->debug.is_pwait = (uint8_t)is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
+ ASSERT(is_sec_reset <= 0xFF);
+ dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
+ ASSERT(is_cw0_enabled <= 0xFF);
+ dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
index 7833a4fb7fbf..a09aa19ad379 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
@@ -41,7 +41,7 @@ void dmub_dcn42_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
union dmub_fw_boot_options boot_options = {0};
if (!dmub->dpia_supported) {
- dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia;
+ dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia != 0;
}
boot_options.bits.z10_disable = params->disable_z10;
@@ -229,6 +229,7 @@ void dmub_dcn42_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
+ (void)cw2;
union dmub_addr offset;
offset = cw3->offset;
@@ -321,11 +322,9 @@ void dmub_dcn42_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
bool dmub_dcn42_is_supported(struct dmub_srv *dmub)
{
- uint32_t supported = 0;
-
- REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
-
- return supported;
+ // DCN without DMUB is not a supported configuration; safe to assume that it is always
+ // present.
+ return true;
}
union dmub_fw_boot_options dmub_dcn42_get_fw_boot_option(struct dmub_srv *dmub)
@@ -678,25 +677,32 @@ void dmub_dcn42_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ ASSERT(is_dmub_enabled <= 0xFF);
+ dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
- dmub->debug.is_pwait = is_pwait;
+ ASSERT(is_pwait <= 0xFF);
+ dmub->debug.is_pwait = (uint8_t)is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- dmub->debug.is_dmcub_soft_reset = is_soft_reset;
+ ASSERT(is_soft_reset <= 0xFF);
+ dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
+ ASSERT(is_sec_reset <= 0xFF);
+ dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- dmub->debug.is_traceport_en = is_traceport_enabled;
+ ASSERT(is_traceport_enabled <= 0xFF);
+ dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
+ ASSERT(is_cw0_enabled <= 0xFF);
+ dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- dmub->debug.is_cw6_enabled = is_cw6_enabled;
+ ASSERT(is_cw6_enabled <= 0xFF);
+ dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled;
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h
index a49d88ab0455..c53f7691d1a8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h
@@ -34,7 +34,101 @@ struct dmub_srv;
/* DCN42 register definitions. */
#define DMUB_DCN42_REGS() \
- DMUB_DCN35_REGS() \
+ DMUB_SR(DMCUB_CNTL) \
+ DMUB_SR(DMCUB_CNTL2) \
+ DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX0_SIZE) \
+ DMUB_SR(DMCUB_INBOX0_RPTR) \
+ DMUB_SR(DMCUB_INBOX0_WPTR) \
+ DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_INBOX1_SIZE) \
+ DMUB_SR(DMCUB_INBOX1_RPTR) \
+ DMUB_SR(DMCUB_INBOX1_WPTR) \
+ DMUB_SR(DMCUB_OUTBOX0_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_OUTBOX0_SIZE) \
+ DMUB_SR(DMCUB_OUTBOX0_RPTR) \
+ DMUB_SR(DMCUB_OUTBOX0_WPTR) \
+ DMUB_SR(DMCUB_OUTBOX1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_OUTBOX1_SIZE) \
+ DMUB_SR(DMCUB_OUTBOX1_RPTR) \
+ DMUB_SR(DMCUB_OUTBOX1_WPTR) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION4_OFFSET) \
+ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION5_OFFSET) \
+ DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_SCRATCH0) \
+ DMUB_SR(DMCUB_SCRATCH1) \
+ DMUB_SR(DMCUB_SCRATCH2) \
+ DMUB_SR(DMCUB_SCRATCH3) \
+ DMUB_SR(DMCUB_SCRATCH4) \
+ DMUB_SR(DMCUB_SCRATCH5) \
+ DMUB_SR(DMCUB_SCRATCH6) \
+ DMUB_SR(DMCUB_SCRATCH7) \
+ DMUB_SR(DMCUB_SCRATCH8) \
+ DMUB_SR(DMCUB_SCRATCH9) \
+ DMUB_SR(DMCUB_SCRATCH10) \
+ DMUB_SR(DMCUB_SCRATCH11) \
+ DMUB_SR(DMCUB_SCRATCH12) \
+ DMUB_SR(DMCUB_SCRATCH13) \
+ DMUB_SR(DMCUB_SCRATCH14) \
+ DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(DMCUB_SCRATCH16) \
+ DMUB_SR(DMCUB_SCRATCH17) \
+ DMUB_SR(DMCUB_SCRATCH18) \
+ DMUB_SR(DMCUB_SCRATCH19) \
+ DMUB_SR(DMCUB_SCRATCH20) \
+ DMUB_SR(DMCUB_SCRATCH21) \
+ DMUB_SR(DMCUB_GPINT_DATAIN0) \
+ DMUB_SR(DMCUB_GPINT_DATAIN1) \
+ DMUB_SR(DMCUB_GPINT_DATAOUT) \
+ DMUB_SR(MMHUBBUB_SOFT_RESET) \
+ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
+ DMUB_SR(DCN_VM_FB_OFFSET) \
+ DMUB_SR(DMCUB_TIMER_CURRENT) \
+ DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \
+ DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \
+ DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR) \
+ DMUB_SR(DMCUB_REGION3_TMR_AXI_SPACE) \
+ DMUB_SR(DMCUB_INTERRUPT_ENABLE) \
+ DMUB_SR(DMCUB_INTERRUPT_ACK) \
+ DMUB_SR(DMU_CLK_CNTL) \
DMUB_SR(DMCUB_INTERRUPT_STATUS) \
DMUB_SR(DMCUB_REG_INBOX0_RDY) \
DMUB_SR(DMCUB_REG_INBOX0_MSG0) \
@@ -59,7 +153,45 @@ struct dmub_srv;
DMUB_SR(HOST_INTERRUPT_CSR)
#define DMUB_DCN42_FIELDS() \
- DMUB_DCN35_FIELDS() \
+ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \
+ DMUB_SF(DMCUB_CNTL2, DMCUB_SOFT_RESET) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
+ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
+ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
+ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
+ DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
+ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
+ DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \
+ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) \
+ DMUB_SF(DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE) \
+ DMUB_SF(DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE) \
+ DMUB_SF(DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE) \
DMUB_SF(DMCUB_INTERRUPT_STATUS, DMCUB_REG_OUTBOX0_RSP_INT_STAT) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_STAT) \
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
index ca0c8a54b635..b5566ef8d4f3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -45,6 +45,7 @@ static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask,
uint32_t mask1, uint32_t field_value1,
va_list ap)
{
+ (void)addr;
uint32_t shift, mask, field_value;
int i = 1;
@@ -57,8 +58,9 @@ static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask,
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
+ ASSERT(shift <= 0xFF);
set_reg_field_value_masks(field_value_mask, field_value, mask,
- shift);
+ (uint8_t)shift);
i++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 3bba256a288d..10d23f5f5d94 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -1034,8 +1034,8 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
{
if (dmub->reg_inbox0.is_pending) {
- dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
- !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+ dmub->reg_inbox0.is_pending = (dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub)) != 0;
if (!dmub->reg_inbox0.is_pending) {
/* ack the rsp interrupt */
@@ -1320,7 +1320,7 @@ void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_t
enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
{
- uint32_t num_pending = 0;
+ uint64_t num_pending = 0;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
@@ -1348,7 +1348,7 @@ enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_c
dmub->reg_inbox0.num_submitted++;
dmub->reg_inbox0.is_pending = true;
- dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending != 0;
return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index dbf3ae2f5e13..62b0b1ef0d10 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -33,6 +33,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include <asm/processor.h>
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
@@ -1592,7 +1593,7 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
adev_to_drm(adev)->unique,
- atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+ str_enabled_disabled(atomic_read(&adev->throttling_logging_enabled)),
adev->throttling_logging_rs.interval / HZ + 1);
}
@@ -2235,7 +2236,7 @@ static ssize_t amdgpu_show_npm_status(struct device *dev,
if (r)
return r;
- return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(npower));
}
/**
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 2b5ac21fee39..1d6e30269d56 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -104,6 +104,21 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
PP_GFXOFF_MASK);
hwmgr->pp_table_version = PP_TABLE_V0;
hwmgr->od_enabled = false;
+ switch (hwmgr->chip_id) {
+ case CHIP_BONAIRE:
+ /* R9 M380 in iMac 2015: SMU hangs when enabling MCLK DPM
+ * R7 260X cards with old MC ucode: MCLK DPM is unstable
+ */
+ if (adev->pdev->subsystem_vendor == 0x106B ||
+ adev->pdev->device == 0x6658) {
+ dev_info(adev->dev, "disabling MCLK DPM on quirky ASIC");
+ adev->pm.pp_feature &= ~PP_MCLK_DPM_MASK;
+ hwmgr->feature_mask &= ~PP_MCLK_DPM_MASK;
+ }
+ break;
+ default:
+ break;
+ }
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e38222877f7e..8c37aa452569 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -787,7 +787,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.vddc_dependency_on_mclk;
struct phm_cac_leakage_table *std_voltage_table =
hwmgr->dyn_state.cac_leakage_table;
- uint32_t i;
+ uint32_t i, clk;
PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
@@ -804,10 +804,12 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
data->dpm_table.sclk_table.count = 0;
for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ clk = min(allowed_vdd_sclk_table->entries[i].clk, data->sclk_cap);
+
if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
+ clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
+ clk;
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
@@ -2794,11 +2796,11 @@ static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
if (tmp)
return -EINVAL;
- tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
if (tmp)
return -EINVAL;
- tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_display_clock);
if (tmp)
return -EINVAL;
@@ -2883,8 +2885,8 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->dyn_state.vddc_dependency_on_display_clock);
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -2955,6 +2957,70 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
return ret;
}
+static int smu7_init_voltage_dependency_on_display_clock_table(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table;
+
+ if (!amdgpu_device_ip_get_ip_block(hwmgr->adev, AMD_IP_BLOCK_TYPE_DCE))
+ return 0;
+
+ table = kzalloc(struct_size(table, entries, 4), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ if (hwmgr->chip_id >= CHIP_POLARIS10) {
+ table->entries[0].clk = 38918;
+ table->entries[1].clk = 45900;
+ table->entries[2].clk = 66700;
+ table->entries[3].clk = 113200;
+
+ table->entries[0].v = 700;
+ table->entries[1].v = 740;
+ table->entries[2].v = 800;
+ table->entries[3].v = 900;
+ } else {
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CZ) {
+ table->entries[0].clk = 35200;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 46700;
+ table->entries[3].clk = 64300;
+ } else {
+ table->entries[0].clk = 0;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 54000;
+ table->entries[3].clk = 62500;
+ }
+
+ table->entries[0].v = 0;
+ table->entries[1].v = 720;
+ table->entries[2].v = 810;
+ table->entries[3].v = 900;
+ }
+
+ table->count = 4;
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = table;
+ return 0;
+}
+
+static void smu7_set_sclk_cap(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->sclk_cap = 0xffffffff;
+
+ if (hwmgr->od_enabled)
+ return;
+
+ /* R9 390X board: last sclk dpm level is unstable, use lower sclk */
+ if (adev->pdev->device == 0x67B0 &&
+ adev->pdev->subsystem_vendor == 0x1043)
+ data->sclk_cap = 104000; /* 1040 MHz */
+
+ if (data->sclk_cap != 0xffffffff)
+ dev_info(adev->dev, "sclk cap: %u kHz on quirky ASIC\n", data->sclk_cap * 10);
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -2966,6 +3032,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return -ENOMEM;
hwmgr->backend = data;
+ smu7_set_sclk_cap(hwmgr);
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
@@ -2983,6 +3050,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_get_elb_voltages(hwmgr);
}
+ result = smu7_init_voltage_dependency_on_display_clock_table(hwmgr);
+ if (result)
+ goto fail;
+
if (hwmgr->pp_table_version == PP_TABLE_V1) {
smu7_complete_dependency_tables(hwmgr);
smu7_set_private_data_based_on_pptable_v1(hwmgr);
@@ -2991,9 +3062,6 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_set_private_data_based_on_pptable_v0(hwmgr);
}
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
if (result)
goto fail;
@@ -3079,13 +3147,40 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
+static uint32_t smu7_lookup_vddc_from_dispclk(struct pp_hwmgr *hwmgr)
+{
+ const struct amd_pp_display_configuration *cfg = hwmgr->display_config;
+ const struct phm_clock_voltage_dependency_table *vddc_dep_on_dispclk =
+ hwmgr->dyn_state.vddc_dependency_on_display_clock;
+ uint32_t i;
+
+ if (!vddc_dep_on_dispclk || !vddc_dep_on_dispclk->count ||
+ !cfg || !cfg->num_display || !cfg->display_clk)
+ return 0;
+
+ /* Start from 1 because ClocksStateUltraLow should not be used according to DC. */
+ for (i = 1; i < vddc_dep_on_dispclk->count; ++i)
+ if (vddc_dep_on_dispclk->entries[i].clk >= cfg->display_clk)
+ return vddc_dep_on_dispclk->entries[i].v;
+
+ return vddc_dep_on_dispclk->entries[vddc_dep_on_dispclk->count - 1].v;
+}
+
+static void smu7_apply_minimum_dce_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ uint32_t req_vddc = smu7_lookup_vddc_from_dispclk(hwmgr);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VddC_Request,
+ req_vddc * VOLTAGE_SCALE,
+ NULL);
+}
+
static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->pp_table_version == PP_TABLE_V1)
- phm_apply_dal_min_voltage_request(hwmgr);
-/* TO DO for v0 iceland and Ci*/
+ smu7_apply_minimum_dce_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -3821,7 +3916,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
- performance_level->engine_clock = engine_clock;
+ performance_level->engine_clock = min(engine_clock, data->sclk_cap);
pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
index d9e8b386bd4d..66adabeab6a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
@@ -234,6 +234,7 @@ struct smu7_hwmgr {
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
+ uint32_t sclk_cap;
struct smu7_leakage_voltage vddc_leakage;
struct smu7_leakage_voltage vddci_leakage;
struct smu7_leakage_voltage vddcgfx_leakage;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 40ecaac6c604..30d83e18db40 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -484,52 +484,6 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
return 0;
}
-/**
- * phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings
- *
- * @hwmgr: the address of the powerplay hardware manager.
- */
-int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4);
-
- if (NULL == table_clk_vlt) {
- pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[0].v = 700;
- else
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[1].v = 740;
- else
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- if (hwmgr->chip_id >= CHIP_POLARIS10 &&
- hwmgr->chip_id <= CHIP_VEGAM)
- table_clk_vlt->entries[2].v = 800;
- else
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- if (pptable_info != NULL)
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
{
uint32_t level = 0;
@@ -540,43 +494,6 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
return level;
}
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table || table->count <= 0
- || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
- || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request,
- req_volt,
- NULL);
- return;
- }
- }
- pr_err("DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
index 83b3c9315143..d370bfd0764d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
@@ -87,9 +87,7 @@ extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_t
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk);
-extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index c661185753b4..3ae45eac0c5c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -631,6 +631,7 @@ struct phm_dynamic_state_info {
struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk;
struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk;
struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk;
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_display_clock;
struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl;
struct phm_clock_array *valid_sclk_values;
struct phm_clock_array *valid_mclk_values;
@@ -772,7 +773,6 @@ struct pp_hwmgr {
const struct pp_smumgr_func *smumgr_funcs;
bool is_kicker;
- enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 62ebec1c6fe3..731355bdb9bc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -245,7 +245,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
case 0x67B8:
- case 0x66B0:
+ case 0x67B0:
smu_data->power_tune_defaults = &defaults_hawaii_xt;
break;
case 0x6640:
@@ -543,12 +543,11 @@ static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
if (ci_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
+ (uint32_t *)&smu_data->power_tune_table.TdcWaterfallCtl, SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
@@ -1217,7 +1216,7 @@ static int ci_populate_single_memory_level(
}
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
+ memory_level->EnabledForActivity = 0;
memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
memory_level->VoltageDownH = 0;
@@ -1322,6 +1321,14 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
return result;
}
+ if (data->mclk_dpm_key_disabled && dpm_table->mclk_table.count) {
+ /* Populate the table with the highest MCLK level when MCLK DPM is disabled */
+ for (i = 0; i < dpm_table->mclk_table.count - 1; i++) {
+ levels[i] = levels[dpm_table->mclk_table.count - 1];
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+ }
+
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
dev_id = adev->pdev->device;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index 7c1701ed3e11..dd94e8a9e218 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -25,18 +25,6 @@
#include "amdgpu_smu.h"
-#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x17
-#define SMU11_DRIVER_IF_VERSION_NV10 0x37
-#define SMU11_DRIVER_IF_VERSION_NV12 0x38
-#define SMU11_DRIVER_IF_VERSION_NV14 0x38
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
-#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
-#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
-#define SMU11_DRIVER_IF_VERSION_Cyan_Skillfish 0x8
-
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -148,8 +136,6 @@ int smu_v11_0_setup_pptable(struct smu_context *smu);
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v11_0_check_fw_version(struct smu_context *smu);
-
int smu_v11_0_set_driver_table_location(struct smu_context *smu);
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index fd3937b08662..2346d9c6e162 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
@@ -35,8 +35,6 @@
int smu_v12_0_check_fw_status(struct smu_context *smu);
-int smu_v12_0_check_fw_version(struct smu_context *smu);
-
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index b453e6efc7c9..4eb40ff8aff2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -25,11 +25,6 @@
#include "amdgpu_smu.h"
-#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
-
#define FEATURE_MASK(feature) (1ULL << feature)
/* MP Apertures */
@@ -124,8 +119,6 @@ int smu_v14_0_setup_pptable(struct smu_context *smu);
int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
-int smu_v14_0_check_fw_version(struct smu_context *smu);
-
int smu_v14_0_set_driver_table_location(struct smu_context *smu);
int smu_v14_0_set_tool_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 74c818e3fbd0..54d3dba7d354 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -65,6 +65,8 @@
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x17
+
static const struct smu_feature_bits arcturus_dpm_features = {
.bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1905,7 +1907,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* pptable related */
.setup_pptable = arcturus_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -1958,5 +1960,6 @@ void arcturus_set_ppt_funcs(struct smu_context *smu)
smu->table_map = arcturus_table_map;
smu->pwr_src_map = arcturus_pwr_src_map;
smu->workload_map = arcturus_workload_map;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
smu_v11_0_init_msg_ctl(smu, arcturus_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 4e70308a455e..e6e009df9840 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -582,7 +582,7 @@ cyan_skillfish_get_enabled_mask(struct smu_context *smu,
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
@@ -605,5 +605,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = MP1_DRIVER_IF_VERSION;
smu_v11_0_init_msg_ctl(smu, cyan_skillfish_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 163e09ca0730..cd0457e13f54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -73,6 +73,10 @@ static const struct smu_feature_bits navi10_dpm_features = {
#define SMU_11_0_GFX_BUSY_THRESHOLD 15
+#define SMU11_DRIVER_IF_VERSION_NV10 0x37
+#define SMU11_DRIVER_IF_VERSION_NV12 0x38
+#define SMU11_DRIVER_IF_VERSION_NV14 0x38
+
static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -3308,7 +3312,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.setup_pptable = navi10_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -3361,11 +3365,26 @@ static const struct pptable_funcs navi10_ppt_funcs = {
void navi10_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &navi10_ppt_funcs;
smu->clock_map = navi10_clk_map;
smu->feature_map = navi10_feature_mask_map;
smu->table_map = navi10_table_map;
smu->pwr_src_map = navi10_pwr_src_map;
smu->workload_map = navi10_workload_map;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ break;
+ case IP_VERSION(11, 0, 9):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
+ case IP_VERSION(11, 0, 5):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ break;
+ }
+
smu_v11_0_init_msg_ctl(smu, navi10_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index cf030af18aad..f799e489b481 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3119,7 +3119,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.setup_pptable = sienna_cichlid_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
@@ -3176,13 +3176,36 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode2_reset = sienna_cichlid_mode2_reset,
};
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
+#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
+
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &sienna_cichlid_ppt_funcs;
smu->clock_map = sienna_cichlid_clk_map;
smu->feature_map = sienna_cichlid_feature_mask_map;
smu->table_map = sienna_cichlid_table_map;
smu->pwr_src_map = sienna_cichlid_pwr_src_map;
smu->workload_map = sienna_cichlid_workload_map;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 7):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
+ break;
+ case IP_VERSION(11, 0, 11):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
+ break;
+ case IP_VERSION(11, 0, 12):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
+ break;
+ case IP_VERSION(11, 0, 13):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
+ break;
+ }
+
smu_v11_0_init_msg_ctl(smu, sienna_cichlid_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 7ca8fdd23206..d68ceee16d8f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -192,81 +192,6 @@ int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v11_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(11, 0, 0):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
- break;
- case IP_VERSION(11, 0, 9):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
- break;
- case IP_VERSION(11, 0, 5):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
- break;
- case IP_VERSION(11, 0, 7):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
- break;
- case IP_VERSION(11, 0, 11):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
- break;
- case IP_VERSION(11, 5, 0):
- case IP_VERSION(11, 5, 2):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
- break;
- case IP_VERSION(11, 0, 12):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
- break;
- case IP_VERSION(11, 0, 13):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
- break;
- case IP_VERSION(11, 0, 8):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
- break;
- case IP_VERSION(11, 0, 2):
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
- break;
- default:
- dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
- amdgpu_ip_version(adev, MP1_HWIP, 0));
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
- break;
- }
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5eabaf55dfc5..d269b505aefb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -2511,7 +2511,7 @@ static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entr
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = vangogh_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -2561,5 +2561,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->table_map = vangogh_table_map;
smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION;
smu_v11_0_init_msg_ctl(smu, vangogh_message_map);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 186020ed6708..75335da224c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1457,7 +1457,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.get_power_profile_mode = renoir_get_power_profile_mode,
.read_sensor = renoir_read_sensor,
.check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index ac5e44dff6c9..f09da4d14510 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -70,42 +70,6 @@ int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v12_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!smu->is_apu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index e38354c694c9..d0a8df1aa6b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -229,66 +229,6 @@ int smu_v14_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-int smu_v14_0_check_fw_version(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint8_t smu_program, smu_major, smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- smu_major = (smu_version >> 16) & 0xff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
- adev->pm.fw_version = smu_version;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 0):
- case IP_VERSION(14, 0, 4):
- case IP_VERSION(14, 0, 5):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
- break;
- case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
- break;
- case IP_VERSION(14, 0, 2):
- case IP_VERSION(14, 0, 3):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
- break;
- default:
- dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- amdgpu_ip_version(adev, MP1_HWIP, 0));
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
- break;
- }
-
- if (adev->pm.fw)
- dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a verbal message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_program, smu_version, smu_major, smu_minor, smu_debug);
- }
-
- return ret;
-}
-
static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 2353524b8821..a28624d4847a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -65,6 +65,9 @@
#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+
#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700
#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678
#define SMU_14_0_0_UMD_PSTATE_FCLK 1800
@@ -1699,7 +1702,7 @@ static int smu_v14_0_0_restore_user_od_settings(struct smu_context *smu)
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
- .check_fw_version = smu_v14_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.init_smc_tables = smu_v14_0_0_init_smc_tables,
.fini_smc_tables = smu_v14_0_0_fini_smc_tables,
.get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
@@ -1750,10 +1753,23 @@ static void smu_v14_0_0_init_msg_ctl(struct smu_context *smu)
void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v14_0_0_ppt_funcs;
smu->feature_map = smu_v14_0_0_feature_mask_map;
smu->table_map = smu_v14_0_0_table_map;
smu->is_apu = true;
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+ break;
+ }
+
smu_v14_0_0_init_msg_ctl(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index c3ebfac062a7..31f9566f7979 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -68,6 +68,8 @@ static const struct smu_feature_bits smu_v14_0_2_dpm_features = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) }
};
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
+
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
@@ -2798,7 +2800,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.fini_power = smu_v14_0_fini_power,
.check_fw_status = smu_v14_0_check_fw_status,
.setup_pptable = smu_v14_0_2_setup_pptable,
- .check_fw_version = smu_v14_0_check_fw_version,
+ .check_fw_version = smu_cmn_check_fw_version,
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.system_features_control = smu_v14_0_system_features_control,
.set_allowed_mask = smu_v14_0_set_allowed_mask,
@@ -2863,5 +2865,6 @@ void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v14_0_2_table_map;
smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
smu->workload_map = smu_v14_0_2_workload_map;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
smu_v14_0_2_init_msg_ctl(smu);
}