summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c545
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c12
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c122
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c69
-rw-r--r--drivers/gpu/drm/drm_pagemap.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h84
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h47
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c137
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h8
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h36
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h68
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c4
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/pptable.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c17
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h9
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c40
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c143
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c15
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c35
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c25
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h6
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c2
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c51
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c8
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules1
137 files changed, 1602 insertions, 1200 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f9774f58ce1..b20a06abb65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,8 @@ extern int amdgpu_rebar;
extern int amdgpu_wbrf;
extern int amdgpu_user_queue;
+extern uint amdgpu_hdmi_hpd_debounce_delay_ms;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 58c3ffe707d1..d2c3885de711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3445,11 +3445,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD/VPE, it's handled specially */
+ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@@ -5064,6 +5063,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ /*
+ * device went through surprise hotplug; we need to destroy topology
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
@@ -5867,6 +5874,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
if (ret)
goto mode1_reset_failed;
+ /* enable mmio access after mode 1 reset completed */
+ adev->no_hw_access = false;
+
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)
@@ -6613,6 +6623,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
int r = 0;
bool need_emergency_restart = false;
+ /* save the pasid here as the job may be freed before the end of the reset */
+ int pasid = job ? job->pasid : -EINVAL;
/*
* If it reaches here because of hang/timeout and a RAS error is
@@ -6713,8 +6725,12 @@ end_reset:
if (!r) {
struct amdgpu_task_info *ti = NULL;
- if (job)
- ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+ /*
+ * The job may already be freed at this point via the sched tdr workqueue so
+ * use the cached pasid.
+ */
+ if (pasid >= 0)
+ ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
ti ? &ti->task : NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b5d34797d606..52bc04452812 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1880,7 +1880,12 @@ int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb)
{
struct amdgpu_bo *abo;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_framebuffer *fb;
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ fb = plane->state->fb;
+ else
+ fb = plane->fb;
if (!fb)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e22cfa7c6d32..c1461317eb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -95,18 +95,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
- /*
- * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
- * Such buffers cannot be safely accessed over P2P due to device-local
- * compression metadata. Fallback to system-memory path instead.
- * Device supports GFX12 (GC 12.x or newer)
- * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
- *
- */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
- bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
- attach->peer2peer = false;
-
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2dfbddcef9ab..6ccb80e2d7c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -33,6 +33,7 @@
#include <drm/drm_vblank.h>
#include <linux/cc_platform.h>
+#include <linux/console.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
@@ -246,6 +247,7 @@ int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
int amdgpu_rebar = -1; /* auto */
int amdgpu_user_queue = -1;
+uint amdgpu_hdmi_hpd_debounce_delay_ms;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1122,6 +1124,16 @@ module_param_named(rebar, amdgpu_rebar, int, 0444);
MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+/*
+ * DOC: hdmi_hpd_debounce_delay_ms (uint)
+ * HDMI HPD disconnect debounce delay in milliseconds.
+ *
+ * Used to filter short disconnect->reconnect HPD toggles some HDMI sinks
+ * generate while entering/leaving power save. Set to 0 to disable by default.
+ */
+MODULE_PARM_DESC(hdmi_hpd_debounce_delay_ms, "HDMI HPD disconnect debounce delay in milliseconds (0 to disable (by default), 1500 is common)");
+module_param_named(hdmi_hpd_debounce_delay_ms, amdgpu_hdmi_hpd_debounce_delay_ms, uint, 0644);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2704,7 +2716,9 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ if (console_suspend_enabled &&
+ !pm_hibernate_is_recovering() &&
+ !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index c7843e336310..06c333b2213b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -89,6 +89,16 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
+static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
+{
+ af->fence_wptr_start = af->ring->wptr;
+}
+
+static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
+{
+ af->fence_wptr_end = af->ring->wptr;
+}
+
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@@ -116,8 +126,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@@ -709,6 +721,7 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
+ bool reemitted = false;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
@@ -726,7 +739,9 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
- if (fence == af)
+ if (fence->reemitted > 1)
+ reemitted = true;
+ else if (fence == af)
dma_fence_set_error(&fence->base, -ETIME);
else if (fence->context == af->context)
dma_fence_set_error(&fence->base, -ECANCELED);
@@ -734,9 +749,12 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
- /* signal the guilty fence */
- amdgpu_fence_write(ring, (u32)af->base.seqno);
- amdgpu_fence_process(ring);
+
+ if (reemitted) {
+ /* if we've already reemitted once then just cancel everything */
+ amdgpu_fence_driver_force_completion(af->ring);
+ af->ring->ring_backup_entries_to_copy = 0;
+ }
}
void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
@@ -784,10 +802,18 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
/* save everything if the ring is not guilty, otherwise
* just save the content from other contexts.
*/
- if (!guilty_fence || (fence->context != guilty_fence->context))
+ if (!fence->reemitted &&
+ (!guilty_fence || (fence->context != guilty_fence->context))) {
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
+ } else if (!fence->reemitted) {
+ /* always save the fence */
+ amdgpu_ring_backup_unprocessed_command(ring,
+ fence->fence_wptr_start,
+ fence->fence_wptr_end);
+ }
wptr = fence->wptr;
+ fence->reemitted++;
}
rcu_read_unlock();
} while (last_seq != seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d2237ce9da70..1485f4789440 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -375,7 +375,7 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @start_page: first page to map in the GART aperture
* @num_pages: number of pages to be mapped
* @flags: page table entry flags
- * @dst: CPU address of the GART table
+ * @dst: valid CPU address of GART table, cannot be null
*
* Binds a BO that is allocated in VRAM to the GART page table
* (all ASICs).
@@ -396,7 +396,7 @@ void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
return;
for (i = 0; i < num_pages; ++i) {
- amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ amdgpu_gmc_set_pte_pde(adev, dst,
start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 869bceb0fe2c..8924380086c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -732,6 +732,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
return 0;
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
+
+ if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid)
+ return 0;
+
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 37270c4dab8d..532f83d783d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -318,12 +318,36 @@ void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
+static int isp_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_resume)
+ return isp->funcs->hw_resume(isp);
+
+ return -ENODEV;
+}
+
+static int isp_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_suspend)
+ return isp->funcs->hw_suspend(isp);
+
+ return -ENODEV;
+}
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
+ .suspend = isp_suspend,
+ .resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index d6f4ffa4c97c..9a5d2b1dff9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -38,6 +38,8 @@ struct amdgpu_isp;
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
+ int (*hw_suspend)(struct amdgpu_isp *isp);
+ int (*hw_resume)(struct amdgpu_isp *isp);
};
struct amdgpu_isp {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6ee77f431d56..f65edd80cabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -201,6 +201,9 @@ static enum amd_ip_block_type
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
+ case AMDGPU_HW_IP_VPE:
+ type = AMD_IP_BLOCK_TYPE_VPE;
+ break;
default:
type = AMD_IP_BLOCK_TYPE_NUM;
break;
@@ -721,6 +724,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMD_IP_BLOCK_TYPE_UVD:
count = adev->uvd.num_uvd_inst;
break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ count = adev->vpe.num_instances;
+ break;
/* For all other IP block types not listed in the switch statement
* the ip status is valid here and the instance count is one.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7a27c6c4bb44..055437d4edf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -144,10 +144,15 @@ struct amdgpu_fence {
struct amdgpu_ring *ring;
ktime_t start_timestamp;
- /* wptr for the fence for resets */
+ /* wptr for the total submission for resets */
u64 wptr;
/* fence context for resets */
u64 context;
+ /* has this fence been reemitted */
+ unsigned int reemitted;
+ /* wptr for the fence for the submission */
+ u64 fence_wptr_start;
+ u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 9a969175900e..58b26c78b642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -885,12 +885,28 @@ static int amdgpu_userq_input_args_validate(struct drm_device *dev,
return 0;
}
+bool amdgpu_userq_enabled(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ return true;
+ }
+
+ return false;
+}
+
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index c37444427a14..b48b3bc293fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -141,6 +141,7 @@ uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
struct drm_file *filp);
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+bool amdgpu_userq_enabled(struct drm_device *dev);
int amdgpu_userq_suspend(struct amdgpu_device *adev);
int amdgpu_userq_resume(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index eba9fb359047..85e9edc1cb6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -141,6 +141,8 @@ static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
{
+ dma_fence_put(userq->last_fence);
+
amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
xa_destroy(&userq->fence_drv_xa);
/* Drop the fence_drv reference held by user queue */
@@ -471,6 +473,9 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_exec exec;
u64 wptr;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
size_mul(sizeof(u32), num_syncobj_handles));
@@ -653,6 +658,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
int r, i, rentry, wentry, cnt;
struct drm_exec exec;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
size_mul(sizeof(u32), num_read_bo_handles));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c362d4dfb5bb..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1069,9 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked &&
- /* SI doesn't support pasid or KIQ/MES */
- params->adev->family > AMDGPU_FAMILY_SI) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8ad7519f7b58..f1ee3921d970 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1235,16 +1235,16 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_RW:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_CC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4258d3e0b706..0002bcc6c4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -26,6 +26,7 @@
*/
#include <linux/gpio/machine.h>
+#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@@ -145,6 +146,9 @@ static int isp_genpd_add_device(struct device *dev, void *data)
return -ENODEV;
}
+ /* The devices will be managed by the pm ops from the parent */
+ dev_pm_syscore_device(dev, true);
+
exit:
/* Continue to add */
return 0;
@@ -177,12 +181,47 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
return -ENODEV;
}
+ dev_pm_syscore_device(dev, false);
exit:
/* Continue to remove */
return 0;
}
+static int isp_suspend_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int isp_resume_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int isp_v4_1_1_hw_suspend(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_suspend_device);
+ if (r)
+ dev_err(isp->parent, "failed to suspend hw devices (%d)\n", r);
+
+ return r;
+}
+
+static int isp_v4_1_1_hw_resume(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_resume_device);
+ if (r)
+ dev_err(isp->parent, "failed to resume hw device (%d)\n", r);
+
+ return r;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
const struct software_node *amd_camera_node, *isp4_node;
@@ -369,6 +408,8 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
static const struct isp_funcs isp_v4_1_1_funcs = {
.hw_init = isp_v4_1_1_hw_init,
.hw_fini = isp_v4_1_1_hw_fini,
+ .hw_suspend = isp_v4_1_1_hw_suspend,
+ .hw_resume = isp_v4_1_1_hw_resume,
};
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d7a2e7178ea9..625ea8ab7a74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,14 +1209,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes)
pdd->last_evict_timestamp = get_jiffies_64();
- retval = suspend_all_queues_mes(dqm);
- if (retval) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
- }
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1246,10 +1240,6 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
- } else {
- retval = resume_all_queues_mes(dqm);
- if (retval)
- dev_err(dev, "Resuming all queues failed");
}
out:
@@ -2919,6 +2909,14 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
+static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
+ struct kfd_mem_obj *mqd)
+{
+ WARN(!mqd, "No hiq sdma mqd trunk to free");
+
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+}
+
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@@ -3042,19 +3040,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
return dqm;
}
+ if (!dev->kfd->shared_resources.enable_mes)
+ deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
+
out_free:
kfree(dqm);
return NULL;
}
-static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
- struct kfd_mem_obj *mqd)
-{
- WARN(!mqd, "No hiq sdma mqd trunk to free");
-
- amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
-}
-
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.stop(dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index a499449fcb06..d2bc169e84b0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -312,7 +312,7 @@ void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), pid,
- node->id, 0));
+ node->id, '0'));
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 740711ac1037..1ea5a250440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5266,6 +5266,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
+ int real_brightness;
+ int init_brightness;
if (aconnector->bl_idx == -1)
return;
@@ -5290,6 +5292,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
+ init_brightness = props.brightness;
+
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
props.scale = BACKLIGHT_SCALE_NON_LINEAR;
@@ -5308,8 +5312,20 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
- } else
+ } else {
+ /*
+ * dm->brightness[x] can be inconsistent just after startup until
+ * ops.get_brightness is called.
+ */
+ real_brightness =
+ amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]);
+
+ if (real_brightness != init_brightness) {
+ dm->actual_brightness[aconnector->bl_idx] = real_brightness;
+ dm->brightness[aconnector->bl_idx] = real_brightness;
+ }
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
+ }
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5626,7 +5642,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
- drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ aconnector->base.name,
link->psr_settings.psr_feature_enabled,
link->psr_settings.psr_version,
link->dpcd_caps.psr_info.psr_version,
@@ -8930,9 +8947,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
- aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
- INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
- aconnector->hdmi_prev_sink = NULL;
+ /*
+ * If HDMI HPD debounce delay is set, use the minimum between selected
+ * value and AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS
+ */
+ if (amdgpu_hdmi_hpd_debounce_delay_ms) {
+ aconnector->hdmi_hpd_debounce_delay_ms = min(amdgpu_hdmi_hpd_debounce_delay_ms,
+ AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS);
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+ } else {
+ aconnector->hdmi_hpd_debounce_delay_ms = 0;
+ }
/*
* configure support HPD hot plug connector_>polled default value is 0
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index bd0403005f37..beb0d04d3e68 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,7 +59,10 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
-#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
+/*
+ * Maximum HDMI HPD debounce delay in milliseconds
+ */
+#define AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS 5000
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index d1471f34e419..9f11e6ca4051 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -763,14 +763,14 @@ static enum bp_result bios_parser_encoder_control(
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac1_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
} else if (cntl->engine_id == ENGINE_ID_DACB) {
if (!bp->cmd_tbl.dac2_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac2_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 22457f417e65..76a3559f0ddc 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -1797,7 +1797,30 @@ static enum bp_result select_crtc_source_v3(
&params.ucEncodeMode))
return BP_RESULT_BADINPUT;
- params.ucDstBpc = bp_params->bit_depth;
+ switch (bp_params->color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ params.ucDstBpc = PANEL_BPC_UNDEFINE;
+ break;
+ case COLOR_DEPTH_666:
+ params.ucDstBpc = PANEL_6BIT_PER_COLOR;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ params.ucDstBpc = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucDstBpc = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucDstBpc = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_141414:
+ dm_error("14-bit color not supported by SelectCRTC_Source v3\n");
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucDstBpc = PANEL_16BIT_PER_COLOR;
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
result = BP_RESULT_OK;
@@ -1815,12 +1838,12 @@ static enum bp_result select_crtc_source_v3(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
@@ -1846,12 +1869,15 @@ static void init_dac_encoder_control(struct bios_parser *bp)
static void dac_encoder_control_prepare_params(
DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
params->ucDacStandard = dac_standard;
- if (enable)
+ if (action == ENCODER_CONTROL_SETUP ||
+ action == ENCODER_CONTROL_INIT)
+ params->ucAction = ATOM_ENCODER_INIT;
+ else if (action == ENCODER_CONTROL_ENABLE)
params->ucAction = ATOM_ENABLE;
else
params->ucAction = ATOM_DISABLE;
@@ -1864,7 +1890,7 @@ static void dac_encoder_control_prepare_params(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1873,7 +1899,7 @@ static enum bp_result dac1_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
@@ -1885,7 +1911,7 @@ static enum bp_result dac1_encoder_control_v1(
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1894,7 +1920,7 @@ static enum bp_result dac2_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index e89b1ba0048b..78bdbcaa61c8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -57,12 +57,12 @@ struct cmd_tbl {
struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac2_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac1_output_control)(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
index b015e80672ec..fcd3ab4b0045 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
@@ -41,7 +41,7 @@
/* kHZ*/
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
/* kHZ*/
-#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 340000
struct dp_hdmi_dongle_signature_data {
int8_t id[15];/* "DP-HDMI ADAPTOR"*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index b357683b4255..268b5fbdb48b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -30,7 +30,11 @@ dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- frame_warn_limit := 3072
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
else
frame_warn_limit := 2048
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 8d24763938ea..1df3412be346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -77,32 +77,14 @@ static unsigned int dscceComputeDelay(
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
-// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -116,7 +98,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -124,9 +105,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -135,14 +113,7 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix);
+ bool *NotEnoughTimeForDynamicMetadata);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
@@ -294,62 +265,23 @@ static void CalculateDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ enum clock_change_support *DRAMClockChangeSupport);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
@@ -810,29 +742,12 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -846,7 +761,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -854,9 +768,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -865,15 +776,10 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix)
+ bool *NotEnoughTimeForDynamicMetadata)
{
+ struct vba_vars_st *v = &mode_lib->vba;
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
@@ -905,26 +811,26 @@ static bool CalculatePrefetchSchedule(
double Tdmec = 0;
double Tdmsks = 0;
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMInefficiencyFactor = v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
- ProgressiveToInterlaceUnitInOPP,
+ v->ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
@@ -932,16 +838,16 @@ static bool CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
- *Tdmdl = TWait + Tvm_trips + trip_to_mem;
+ if (v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true) {
+ v->Tdmdl[k] = TWait + Tvm_trips + trip_to_mem;
} else {
- *Tdmdl = TWait + UrgentExtraLatency;
+ v->Tdmdl[k] = TWait + UrgentExtraLatency;
}
- if (DynamicMetadataEnable == true) {
- if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
+ if (v->DynamicMetadataEnable[k] == true) {
+ if (VStartup * LineTime < Tsetup + v->Tdmdl[k] + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
@@ -949,39 +855,39 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ v->Tdmdl_vm[k] = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
- *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ v->DSTXAfterScaler[k] = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
- *DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
+ v->DSTXAfterScaler[k] = v->DSTXAfterScaler[k] + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
- *DSTYAfterScaler = 1;
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && v->ProgressiveToInterlaceUnitInOPP))
+ v->DSTYAfterScaler[k] = 1;
else
- *DSTYAfterScaler = 0;
+ v->DSTYAfterScaler[k] = 0;
- DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = v->DSTYAfterScaler[k] * myPipe->HTotal + v->DSTXAfterScaler[k];
+ v->DSTYAfterScaler[k] = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ v->DSTXAfterScaler[k] = DSTTotalPixelsAfterScaler - ((double) (v->DSTYAfterScaler[k] * myPipe->HTotal));
MyError = false;
@@ -990,33 +896,33 @@ static bool CalculatePrefetchSchedule(
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
- if (GPUVMEnable) {
- if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
+ if (v->GPUVMEnable) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
+ v->Tno_bw[k] = UrgentExtraLatency + trip_to_mem * ((v->GPUVMMaxPageTableLevels - 2) - 1);
} else
- *Tno_bw = 0;
+ v->Tno_bw[k] = 0;
} else if (!myPipe->DCCEnable)
- *Tno_bw = LineTime;
+ v->Tno_bw[k] = LineTime;
else
- *Tno_bw = LineTime / 4;
+ v->Tno_bw[k] = LineTime / 4;
- dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, v->Tdmdl[k])) / LineTime
+ - (v->DSTYAfterScaler[k] + v->DSTXAfterScaler[k] / myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
- prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
+ prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k]) / Tsw_oto;
- if (GPUVMEnable == true) {
- Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ if (v->GPUVMEnable == true) {
+ Tvm_oto = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
@@ -1042,10 +948,10 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
- dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
- dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
+ dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", v->Tdmdl_vm[k]);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
+ dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", v->DSTXAfterScaler[k]);
+ dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)v->DSTYAfterScaler[k]);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -1059,26 +965,26 @@ static bool CalculatePrefetchSchedule(
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
- if (Tpre_rounded - *Tno_bw > 0)
+ if (Tpre_rounded - v->Tno_bw[k] > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
- + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
- / (Tpre_rounded - *Tno_bw);
+ + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
+ / (Tpre_rounded - v->Tno_bw[k]);
else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
- PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
+ if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]) > 0) {
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]);
}
- if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ if (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
- BytePerPixelC) /
- (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
+ v->BytePerPixelC[k]) /
+ (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
@@ -1086,7 +992,7 @@ static bool CalculatePrefetchSchedule(
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
- swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
+ swath_width_chroma_ub * v->BytePerPixelC[k]) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
@@ -1096,7 +1002,7 @@ static bool CalculatePrefetchSchedule(
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
- PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
+ PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
@@ -1107,7 +1013,7 @@ static bool CalculatePrefetchSchedule(
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
@@ -1118,7 +1024,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth2 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
@@ -1129,7 +1035,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth3 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
@@ -1152,13 +1058,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable) {
- Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
+ if (v->GPUVMEnable) {
+ Tvm_equ = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable || myPipe->DCCEnable)) {
+ if ((v->GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -1227,7 +1133,7 @@ static bool CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
- *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
+ *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * v->BytePerPixelC[k] * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
@@ -1243,9 +1149,9 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
- dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
+ dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
@@ -1276,7 +1182,7 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
- *prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
+ v->prefetch_vmrow_bw[k] = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
@@ -2437,30 +2343,12 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelay[k],
- v->DPPCLKDelaySubtotal
- + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
@@ -2474,7 +2362,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
- v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
@@ -2482,9 +2369,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
@@ -2493,14 +2377,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
- &v->NotEnoughTimeForDynamicMetadata[k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NotEnoughTimeForDynamicMetadata[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
@@ -2730,62 +2607,23 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
- v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &DRAMClockChangeSupport);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -4770,29 +4608,12 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelayPerState[i][k],
- v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
@@ -4806,7 +4627,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
- v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
@@ -4814,9 +4634,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
@@ -4825,14 +4642,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &v->NoTimeForDynamicMetadata[i][j][k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NoTimeForDynamicMetadata[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5007,62 +4817,23 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
- v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->DRAMClockChangeSupport[i][j]);
}
}
@@ -5179,63 +4950,25 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ enum clock_change_support *DRAMClockChangeSupport)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
@@ -5254,101 +4987,101 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
- mode_lib->vba.TotalActiveDPP = 0;
- mode_lib->vba.TotalDCCActiveDPP = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
- if (DCCEnable[k] == true) {
- mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
+ v->TotalActiveDPP = 0;
+ v->TotalDCCActiveDPP = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalActiveDPP = v->TotalActiveDPP + DPPPerPlane[k];
+ if (v->DCCEnable[k] == true) {
+ v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + DPPPerPlane[k];
}
}
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->UrgentWatermark;
- mode_lib->vba.TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
- mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ v->TotalActiveWriteback = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
+ v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
- mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ v->LBLatencyHidingSourceLinesY = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
- mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ v->LBLatencyHidingSourceLinesC = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
- LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
- ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
+ if (v->WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
- if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
- WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
+ WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
- mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
- mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
+ v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5356,40 +5089,40 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->FinalDRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
- SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
- mode_lib->vba.TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
- mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
+ v->TotalNumberOfActiveOTG = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (v->MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
+ } else if (((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
- TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
}
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
+ v->StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ v->StutterEnterPlusExitWatermark = dml_max(v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4986f12dc9df..ebd74b43e935 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1118,13 +1118,13 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
num_audio++;
}
+ if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa) {
+ /*wake AZ from D3 first before access az endpoint*/
+ clk_mgr->funcs->enable_pme_wa(clk_mgr);
+ }
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa)
- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- clk_mgr->funcs->enable_pme_wa(clk_mgr);
-
link_hwss->enable_audio_packet(pipe_ctx);
if (pipe_ctx->stream_res.audio)
@@ -1610,38 +1610,12 @@ dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
struct dc_bios *bios = link->ctx->dc_bios;
struct bp_crtc_source_select crtc_source_select = {0};
enum engine_id engine_id = link->link_enc->preferred_engine;
- uint8_t bit_depth;
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
engine_id = link->link_enc->analog_engine;
- switch (pipe_ctx->stream->timing.display_color_depth) {
- case COLOR_DEPTH_UNDEFINED:
- bit_depth = 0;
- break;
- case COLOR_DEPTH_666:
- bit_depth = 6;
- break;
- default:
- case COLOR_DEPTH_888:
- bit_depth = 8;
- break;
- case COLOR_DEPTH_101010:
- bit_depth = 10;
- break;
- case COLOR_DEPTH_121212:
- bit_depth = 12;
- break;
- case COLOR_DEPTH_141414:
- bit_depth = 14;
- break;
- case COLOR_DEPTH_161616:
- bit_depth = 16;
- break;
- }
-
crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
- crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
crtc_source_select.engine_id = engine_id;
crtc_source_select.sink_signal = pipe_ctx->stream->signal;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 6d31f4967f1a..7fa6bc97a919 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -336,7 +336,7 @@ static void query_dp_dual_mode_adaptor(
/* Assume we have no valid DP passive dongle connected */
*dongle = DISPLAY_DONGLE_NONE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
if (!i2c_read(
@@ -392,6 +392,8 @@ static void query_dp_dual_mode_adaptor(
}
}
+ if (is_valid_hdmi_signature)
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
if (is_type2_dongle) {
uint32_t max_tmds_clk =
@@ -932,7 +934,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum engine_id engine_id = link_enc->preferred_engine;
enum dal_device_type device_type = DEVICE_TYPE_CRT;
- enum bp_result bp_result;
+ enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t enum_id;
switch (engine_id) {
@@ -946,7 +948,9 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
break;
}
- bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ if (bios->funcs->dac_load_detection)
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+
return bp_result == BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ef69898d2cc5..d056e5fd5458 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -203,12 +203,12 @@ enum dcn35_clk_src_array_id {
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\
- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define bios_regs_init() \
( \
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index f3c614c4490c..9fab3169069c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -183,12 +183,12 @@ enum dcn351_clk_src_array_id {
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\
- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define bios_regs_init() \
( \
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 973b6bdbac63..f40dc612ec73 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -136,7 +136,7 @@ struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
enum signal_type sink_signal;
- uint8_t bit_depth;
+ enum dc_color_depth color_depth;
};
struct bp_transmitter_control {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7c9f77124ab2..c4966dcc6875 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2455,24 +2455,21 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
- pptable->PcieLaneCount[i] > pcie_width_cap) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
- pptable->PcieGenSpeed[i] > pcie_gen_cap ?
- pcie_gen_cap : pptable->PcieGenSpeed[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
- pptable->PcieLaneCount[i] > pcie_width_cap ?
- pcie_width_cap : pptable->PcieLaneCount[i];
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_gen_cap << 8;
- smu_pcie_arg |= pcie_width_cap;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- break;
- }
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_gen[i] << 8;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_lane[i];
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 677781060246..eaeff6a9bc50 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2923,8 +2923,13 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
break;
}
- if (!ret)
+ if (!ret) {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index f9b0938c57ea..f2a16dfee599 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1939,6 +1939,11 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
dev_err(smu->adev->dev, "Set soft max sclk failed!");
return ret;
}
+ if (smu->gfx_actual_hard_min_freq != smu->gfx_default_hard_min_freq ||
+ smu->gfx_actual_soft_max_freq != smu->gfx_default_soft_max_freq)
+ smu->user_dpm_profile.user_od = true;
+ else
+ smu->user_dpm_profile.user_od = false;
break;
default:
return -ENOSYS;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index b1bd946d8e30..97414bc39764 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1514,9 +1514,10 @@ static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
- smu->gfx_actual_hard_min_freq = 0;
- smu->gfx_actual_soft_max_freq = 0;
-
+ if (smu->gfx_actual_hard_min_freq == 0)
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ if (smu->gfx_actual_soft_max_freq == 0)
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
return 0;
}
@@ -1526,8 +1527,10 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
- smu->gfx_actual_hard_min_freq = 0;
- smu->gfx_actual_soft_max_freq = 0;
+ if (smu->gfx_actual_hard_min_freq == 0)
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ if (smu->gfx_actual_soft_max_freq == 0)
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
return 0;
}
@@ -1665,6 +1668,29 @@ static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_0_restore_user_od_settings(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to restore hard min sclk!\n");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to restore soft max sclk!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1688,6 +1714,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
+ .restore_user_od_settings = smu_v14_0_0_restore_user_od_settings,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 2cea688c604f..d7642d388bc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1702,8 +1702,9 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
+ int16_t od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ uint32_t power_limit;
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2143,10 +2144,15 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
if (!ret) {
- if (amdgpu_emu_mode == 1)
+ if (amdgpu_emu_mode == 1) {
msleep(50000);
- else
+ } else {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(1000);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index fe4c026280f0..60166919c5b5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -163,6 +163,7 @@ struct dw_hdmi_qp {
unsigned long ref_clk_rate;
struct regmap *regm;
+ int main_irq;
unsigned long tmds_char_rate;
};
@@ -1271,6 +1272,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
dw_hdmi_qp_init_hw(hdmi);
+ hdmi->main_irq = plat_data->main_irq;
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
dw_hdmi_qp_main_hardirq, NULL,
IRQF_SHARED, dev_name(dev), hdmi);
@@ -1331,9 +1333,16 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi)
+{
+ disable_irq(hdmi->main_irq);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_qp_suspend);
+
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_init_hw(hdmi);
+ enable_irq(hdmi->main_irq);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 10adac9397cf..5beea645035f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1162,8 +1162,18 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
new_state->self_refresh_active;
}
-static void
-encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, disables the bridge chain all the way, then disables the encoder
+ * afterwards.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1229,9 +1239,18 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
-static void
-crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_disable - disable CRTSs
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all CRTCs in the current state and if the CRTC needs
+ * it, disables it.
+ */
+void
+drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1282,9 +1301,18 @@ crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
drm_crtc_vblank_put(crtc);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
-static void
-encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, post-disables all encoder bridges.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1335,15 +1363,16 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
- encoder_bridge_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
- crtc_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
- encoder_bridge_post_disable(dev, state);
+ drm_atomic_helper_commit_crtc_disable(dev, state);
}
/**
@@ -1446,8 +1475,17 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
-static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_set_mode - set the new mode
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the mode has
+ * changed, change the mode of the CRTC, then call down the bridge
+ * chain and change the mode in all bridges as well.
+ */
+void
+drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1508,6 +1546,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
@@ -1531,12 +1570,21 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, state);
+ drm_atomic_helper_commit_crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
-static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_writebacks - issue writebacks
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors, checks if the new state requires
+ * a writeback job to be issued and in that case issues an atomic
+ * commit on each connector.
+ */
+void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1555,9 +1603,18 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
-static void
-encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors and if the CRTC needs it, pre-enables
+ * the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1588,9 +1645,18 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
-static void
-crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_enable - enables the CRTCs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over CRTCs in the new state, and of the CRTC needs
+ * it, enables it.
+ */
+void
+drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1619,9 +1685,18 @@ crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
-static void
-encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over all connectors in the new state, and of the CRTC needs
+ * it, enables the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1664,6 +1739,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
@@ -1682,11 +1758,11 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *state)
{
- encoder_bridge_pre_enable(dev, state);
+ drm_atomic_helper_commit_crtc_enable(dev, state);
- crtc_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
- encoder_bridge_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4a7f72044ab8..4b47aa0dab35 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -366,6 +366,9 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
+ if (helper->info->state != FBINFO_STATE_RUNNING)
+ return;
+
drm_fb_helper_fb_dirty(helper);
}
@@ -732,6 +735,13 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
+ /*
+ * Cancel pending damage work. During GPU reset, VBlank
+ * interrupts are disabled and drm_fb_helper_fb_dirty()
+ * would wait for VBlank timeout otherwise.
+ */
+ cancel_work_sync(&fb_helper->damage_work);
+
console_lock();
} else {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index efc79bbf3c73..e4df43427394 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -969,8 +969,10 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (args->handle == args->new_handle)
- return 0;
+ if (args->handle == args->new_handle) {
+ ret = 0;
+ goto out;
+ }
mutex_lock(&file_priv->prime.lock);
@@ -1002,6 +1004,8 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&file_priv->prime.lock);
+out:
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 93b9cff89080..f13eb5f36e8a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -96,7 +96,8 @@ err_release:
/**
* drm_gem_shmem_init - Initialize an allocated object.
* @dev: DRM device
- * @obj: The allocated shmem GEM object.
+ * @shmem: The allocated shmem GEM object.
+ * @size: Buffer size in bytes
*
* Returns:
* 0 on success, or a negative error code on failure.
@@ -895,4 +896,4 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 8a06d296561d..0de47e83d84d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1602,14 +1602,48 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+/*
+ * drm_gpuvm_bo_destroy_not_in_lists() - final part of drm_gpuvm_bo cleanup
+ * @vm_bo: the &drm_gpuvm_bo to destroy
+ *
+ * It is illegal to call this method if the @vm_bo is present in the GEMs gpuva
+ * list, the extobj list, or the evicted list.
+ *
+ * Note that this puts a refcount on the GEM object, which may destroy the GEM
+ * object if the refcount reaches zero. It's illegal for this to happen if the
+ * caller holds the GEMs gpuva mutex because it would free the mutex.
+ */
+static void
+drm_gpuvm_bo_destroy_not_in_lists(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gem_object *obj = vm_bo->obj;
+
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+}
+
+static void
+drm_gpuvm_bo_destroy_not_in_lists_kref(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
+}
+
static void
drm_gpuvm_bo_destroy(struct kref *kref)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
struct drm_gpuvm *gpuvm = vm_bo->vm;
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
- struct drm_gem_object *obj = vm_bo->obj;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
if (!lock)
@@ -1618,16 +1652,10 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(gpuvm, obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, vm_bo->obj);
list_del(&vm_bo->list.entry.gem);
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
/**
@@ -1745,9 +1773,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
void
drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
{
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gpuvm_bo *vm_bo;
- struct drm_gem_object *obj;
struct llist_node *bo_defer;
bo_defer = llist_del_all(&gpuvm->bo_defer);
@@ -1766,14 +1792,7 @@ drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
while (bo_defer) {
vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
bo_defer = bo_defer->next;
- obj = vm_bo->obj;
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
@@ -1861,6 +1880,9 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
* count is decreased. If not found @__vm_bo is returned without further
* increase of the reference count.
*
+ * The provided @__vm_bo must not already be in the gpuva, evict, or extobj
+ * lists prior to calling this method.
+ *
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
*
* Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
@@ -1873,14 +1895,19 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
struct drm_gem_object *obj = __vm_bo->obj;
struct drm_gpuvm_bo *vm_bo;
+ drm_WARN_ON(gpuvm->drm, !drm_gpuvm_immediate_mode(gpuvm));
+
+ mutex_lock(&obj->gpuva.lock);
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
if (vm_bo) {
- drm_gpuvm_bo_put(__vm_bo);
+ mutex_unlock(&obj->gpuva.lock);
+ kref_put(&__vm_bo->kref, drm_gpuvm_bo_destroy_not_in_lists_kref);
return vm_bo;
}
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
+ mutex_unlock(&obj->gpuva.lock);
return __vm_bo;
}
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 37d7cfbbb3e8..06c1bd8fc4d1 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -3,6 +3,7 @@
* Copyright © 2024-2025 Intel Corporation
*/
+#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, pagemap_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages,
+ devmem_allocation->pre_migrate_fence);
if (err)
goto err_finalize;
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
+ devmem_allocation->pre_migrate_fence = NULL;
+
/* Upon success bind devmem allocation to range and zdd */
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(timeslice_ms);
@@ -596,7 +601,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
* @ops: Pointer to the operations structure for GPU SVM device memory
* @dpagemap: The struct drm_pagemap we're allocating from.
* @size: Size of device memory allocation
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence)
{
init_completion(&devmem_allocation->detached);
devmem_allocation->dev = dev;
@@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
devmem_allocation->ops = ops;
devmem_allocation->dpagemap = dpagemap;
devmem_allocation->size = size;
+ devmem_allocation->pre_migrate_fence = pre_migrate_fence;
}
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 01813e11e6c6..8e76ac8ee4e2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1692,7 +1692,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
- mod_delayed_work(system_wq, &hdata->hotplug_work,
+ mod_delayed_work(system_percpu_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 76d77a736d84..4b77be94348d 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -457,27 +457,20 @@ int gud_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
- const struct drm_format_info *format = fb->format;
+ const struct drm_format_info *format;
struct drm_connector *connector;
unsigned int i, num_properties;
struct gud_state_req *req;
int idx, ret;
size_t len;
- if (drm_WARN_ON_ONCE(plane->dev, !fb))
- return -EINVAL;
-
- if (drm_WARN_ON_ONCE(plane->dev, !crtc))
- return -EINVAL;
-
- crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
- mode = &crtc_state->mode;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
@@ -492,6 +485,9 @@ int gud_plane_atomic_check(struct drm_plane *plane,
if (old_plane_state->rotation != new_plane_state->rotation)
crtc_state->mode_changed = true;
+ mode = &crtc_state->mode;
+ format = fb->format;
+
if (old_fb && old_fb->format != format)
crtc_state->mode_changed = true;
@@ -598,7 +594,7 @@ void gud_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_helper_damage_iter iter;
int ret, idx;
- if (crtc->state->mode_changed || !crtc->state->enable) {
+ if (!crtc || crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
mutex_lock(&gdrm->damage_lock);
if (gdrm->fb) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b057c2fa03a4..d49e96f9be51 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ return err;
}
err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) {
i915_vma_put(vma);
- goto err;
+ return err;
}
err = eb_add_vma(eb, &current_batch, i, vma);
@@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
- if (err) {
- if (i + 1 < eb->buffer_count) {
- /*
- * Execbuffer code expects last vma entry to be NULL,
- * since we already initialized this entry,
- * set the next value to NULL or we mess up
- * cleanup handling.
- */
- eb->vma[i + 1].vma = NULL;
- }
-
+ if (err)
return err;
- }
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
eb->args->flags |= __EXEC_USERPTR_USED;
@@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
return 0;
-
-err:
- eb->vma[i].vma = NULL;
- return err;
}
static int eb_lock_vmas(struct i915_execbuffer *eb)
@@ -3375,7 +3360,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.exec = exec;
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
+ memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
+
eb.batch_pool = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
@@ -3584,7 +3570,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate extra slots for use by the command parser */
+ /*
+ * Allocate extra slots for use by the command parser.
+ *
+ * Note that this allocation handles two different arrays (the
+ * exec2_list array, and the eventual eb.vma array introduced in
+ * i915_gem_do_execbuffer()), that reside in virtually contiguous
+ * memory. Also note that the allocation intentionally doesn't fill the
+ * area with zeros, because the exec2_list part doesn't need to be, as
+ * it's immediately overwritten by user data a few lines below.
+ * However, the eb.vma part is explicitly zeroed later in
+ * i915_gem_do_execbuffer().
+ */
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 7582ef34bf3f..303d8d9b7775 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -686,7 +686,7 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
}
/* This list includes registers that are useful in debugging GuC hangs. */
-const struct {
+static const struct {
u32 start;
u32 count;
} guc_hw_reg_state[] = {
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index a66cf082af24..c07c9a915190 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj)
drm_gem_shmem_object_free(obj);
}
+static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
+{
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
+
+ if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
{
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
@@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
.free = pvr_gem_object_free,
.print_info = drm_gem_shmem_object_print_info,
+ .export = pvr_gem_export,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0e2bcd5f67b7..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,12 +1002,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
- /*
- * set flag to request the DSI host bridge be pre-enabled before device bridge
- * in the chain, so the DSI host is ready when the device bridge is pre-enabled
- */
- dsi->next_bridge->pre_enable_prev_first = true;
-
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 29107b362346..ac9a95aab2fb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1376,7 +1376,6 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
REG_A6XX_UCHE_MODE_CNTL,
REG_A6XX_RB_NC_MODE_CNTL,
REG_A6XX_RB_CMP_DBG_ECO_CNTL,
- REG_A7XX_GRAS_NC_MODE_CNTL,
REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE,
REG_A6XX_UCHE_GBIF_GX_CONFIG,
REG_A6XX_UCHE_CLIENT_PF,
@@ -1392,6 +1391,7 @@ static const u32 a750_ifpc_reglist_regs[] = {
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+ REG_A6XX_RBBM_PERFCTR_CNTL,
REG_A6XX_TPL1_NC_MODE_CNTL,
REG_A6XX_SP_NC_MODE_CNTL,
REG_A6XX_CP_DBG_ECO_CNTL,
@@ -1448,6 +1448,12 @@ static const u32 a750_ifpc_reglist_regs[] = {
DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist);
+static const struct adreno_reglist_pipe a7xx_dyn_pwrup_reglist_regs[] = {
+ { REG_A7XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+};
+
+DECLARE_ADRENO_REGLIST_PIPE_LIST(a7xx_dyn_pwrup_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1491,6 +1497,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a730_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
},
@@ -1513,6 +1520,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
@@ -1547,6 +1555,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7050001,
@@ -1589,6 +1598,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7090100,
@@ -1623,6 +1633,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x70f0000,
.gmu_cgc_mode = 0x00020222,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0200a7e71cdf..2129d230a92b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -849,9 +849,16 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
- if (adreno_is_a7xx(adreno_gpu))
- gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
- FIELD_PREP(GENMASK(8, 5), hbb_lo));
+ if (adreno_is_a7xx(adreno_gpu)) {
+ for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) {
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id));
+ gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
+ FIELD_PREP(GENMASK(8, 5), hbb_lo));
+ }
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE));
+ }
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
min_acc_len_64b << 23 | hbb_lo << 21);
@@ -865,23 +872,27 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
const struct adreno_reglist_list *reglist;
+ const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
void *ptr = a6xx_gpu->pwrup_reglist_ptr;
struct cpu_gpu_lock *lock = ptr;
u32 *dest = (u32 *)&lock->regs[0];
+ u32 dyn_pwrup_reglist_count = 0;
int i;
lock->gpu_req = lock->cpu_req = lock->turn = 0;
reglist = adreno_gpu->info->a6xx->ifpc_reglist;
- lock->ifpc_list_len = reglist->count;
+ if (reglist) {
+ lock->ifpc_list_len = reglist->count;
- /*
- * For each entry in each of the lists, write the offset and the current
- * register value into the GPU buffer
- */
- for (i = 0; i < reglist->count; i++) {
- *dest++ = reglist->regs[i];
- *dest++ = gpu_read(gpu, reglist->regs[i]);
+ /*
+ * For each entry in each of the lists, write the offset and the current
+ * register value into the GPU buffer
+ */
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
}
reglist = adreno_gpu->info->a6xx->pwrup_reglist;
@@ -907,7 +918,24 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
* (<aperture, shifted 12 bits> <address> <data>), and the length is
* stored as number for triplets in dynamic_list_len.
*/
- lock->dynamic_list_len = 0;
+ dyn_pwrup_reglist = adreno_gpu->info->a6xx->dyn_pwrup_reglist;
+ if (dyn_pwrup_reglist) {
+ for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) {
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id));
+ for (i = 0; i < dyn_pwrup_reglist->count; i++) {
+ if ((dyn_pwrup_reglist->regs[i].pipe & BIT(pipe_id)) == 0)
+ continue;
+ *dest++ = A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id);
+ *dest++ = dyn_pwrup_reglist->regs[i].offset;
+ *dest++ = gpu_read(gpu, dyn_pwrup_reglist->regs[i].offset);
+ dyn_pwrup_reglist_count++;
+ }
+ }
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE));
+ }
+ lock->dynamic_list_len = dyn_pwrup_reglist_count;
}
static int a7xx_preempt_start(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6820216ec5fc..4eaa04711246 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -45,6 +45,7 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
+ const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
const struct adreno_reglist_list *ifpc_reglist;
const struct adreno_reglist *gbif_cx;
const struct adreno_reglist_pipe *nonctxt_reglist;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index afc5f4aa3b17..747a22afad9f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -454,11 +454,11 @@ void a6xx_preempt_init(struct msm_gpu *gpu)
gpu->vm, &a6xx_gpu->preempt_postamble_bo,
&a6xx_gpu->preempt_postamble_iova);
- preempt_prepare_postamble(a6xx_gpu);
-
if (IS_ERR(a6xx_gpu->preempt_postamble_ptr))
goto fail;
+ preempt_prepare_postamble(a6xx_gpu);
+
timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0);
return;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0f8d3de97636..1d0145f8b3ec 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -188,6 +188,19 @@ static const struct adreno_reglist_list name = { \
.count = ARRAY_SIZE(name ## _regs), \
};
+struct adreno_reglist_pipe_list {
+ /** @reg: List of register **/
+ const struct adreno_reglist_pipe *regs;
+ /** @count: Number of registers in the list **/
+ u32 count;
+};
+
+#define DECLARE_ADRENO_REGLIST_PIPE_LIST(name) \
+static const struct adreno_reglist_pipe_list name = { \
+ .regs = name ## _regs, \
+ .count = ARRAY_SIZE(name ## _regs), \
+};
+
struct adreno_gpu {
struct msm_gpu base;
const struct adreno_info *info;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index c39f1908ea65..2d06c950e814 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -200,7 +200,7 @@ static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
struct dpu_crtc_state *crtc_state)
{
struct dpu_crtc_mixer *m;
- u32 crcs[CRTC_QUAD_MIXERS];
+ u32 crcs[CRTC_DUAL_MIXERS];
int rc = 0;
int i;
@@ -1328,7 +1328,6 @@ static struct msm_display_topology dpu_crtc_get_topology(
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct msm_display_topology topology = {0};
struct drm_encoder *drm_enc;
- u32 num_rt_intf;
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
@@ -1342,14 +1341,11 @@ static struct msm_display_topology dpu_crtc_get_topology(
* Dual display
* 2 LM, 2 INTF ( Split display using 2 interfaces)
*
- * If DSC is enabled, try to use 4:4:2 topology if there is enough
- * resource. Otherwise, use 2:2:2 topology.
- *
* Single display
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * If DSC is enabled, use 2:2:1 topology
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
*
* Add dspps to the reservation requirements if ctm is requested
*
@@ -1361,23 +1357,14 @@ static struct msm_display_topology dpu_crtc_get_topology(
* (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
*/
- num_rt_intf = topology.num_intf;
- if (topology.cwb_enabled)
- num_rt_intf--;
-
- if (topology.num_dsc) {
- if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2)
- topology.num_dsc = num_rt_intf * 2;
- else
- topology.num_dsc = num_rt_intf;
- topology.num_lm = topology.num_dsc;
- } else if (num_rt_intf == 2) {
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
topology.num_lm = 2;
- } else if (dpu_kms->catalog->caps->has_3d_merge) {
+ else if (dpu_kms->catalog->caps->has_3d_merge)
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- } else {
+ else
topology.num_lm = 1;
- }
if (crtc_state->ctm)
topology.num_dspp = topology.num_lm;
@@ -1620,17 +1607,6 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
return 0;
}
-/**
- * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline
- * @state: Pointer to drm crtc state object
- */
-unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
-
- return cstate->num_mixers;
-}
-
#ifdef CONFIG_DEBUG_FS
static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 455073c7025b..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -210,7 +210,7 @@ struct dpu_crtc_state {
bool bw_control;
bool bw_split_vote;
- struct drm_rect lm_bounds[CRTC_QUAD_MIXERS];
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
@@ -218,10 +218,10 @@ struct dpu_crtc_state {
/* HW Resources reserved for the crtc */
u32 num_mixers;
- struct dpu_crtc_mixer mixers[CRTC_QUAD_MIXERS];
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
u32 num_ctls;
- struct dpu_hw_ctl *hw_ctls[CRTC_QUAD_MIXERS];
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
enum dpu_crtc_crc_source crc_source;
int crc_frame_skip_count;
@@ -267,6 +267,4 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event);
-unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state);
-
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d1cfe81a3373..9f3957f24c6a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -55,7 +55,7 @@
#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
-#define MAX_CHANNELS_PER_ENC 4
+#define MAX_CHANNELS_PER_ENC 2
#define MAX_CWB_PER_ENC 2
#define IDLE_SHORT_TIMEOUT 1
@@ -661,6 +661,7 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_framebuffer *fb;
@@ -674,12 +675,22 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
dsc = dpu_encoder_get_dsc_config(drm_enc);
- /*
- * Set DSC number as 1 to mark the enabled status, will be adjusted
- * in dpu_crtc_get_topology()
- */
- if (dsc)
- topology->num_dsc = 1;
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
+ if (dsc) {
+ /*
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
+ */
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
+ }
connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
if (!connector)
@@ -2169,8 +2180,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
int i, num_lm;
struct dpu_global_state *global_state;
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
/* reset all mixers for this encoder */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 09395d7910ac..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -302,7 +302,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
/* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- (dpu_cstate->num_mixers != 1) &&
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
!dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 46f348972a97..6d28f2281c76 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -247,14 +247,12 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
if (hw_cdm)
intf_cfg.cdm = hw_cdm->idx;
- if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
- phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
- mode_3d);
+ if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode)
+ hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d);
/* setup which pp blk will connect to this wb */
- if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
- phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
- phys_enc->hw_pp->idx);
+ if (hw_pp && hw_wb->ops.bind_pingpong_blk)
+ hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
} else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 336757103b5a..4964e70610d1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -24,7 +24,7 @@
#define DPU_MAX_IMG_WIDTH 0x3fff
#define DPU_MAX_IMG_HEIGHT 0x3fff
-#define CRTC_QUAD_MIXERS 4
+#define CRTC_DUAL_MIXERS 2
#define MAX_XIN_COUNT 16
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
index 6bb3476a05f8..75e6dae0fcd9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -89,13 +89,13 @@ enum dpu_hw_cdwn_op_mode_method_h_v {
*/
struct dpu_hw_cdm_ops {
/**
- * Enable the CDM module
+ * @enable: Enable the CDM module
* @cdm Pointer to chroma down context
*/
int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg);
/**
- * Enable/disable the connection with pingpong
+ * @bind_pingpong_blk: Enable/disable the connection with pingpong
* @cdm Pointer to chroma down context
* @pp pingpong block id.
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 15931b22ec94..e535bf013825 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -12,9 +12,9 @@
#include "dpu_hw_sspp.h"
/**
- * dpu_ctl_mode_sel: Interface mode selection
- * DPU_CTL_MODE_SEL_VID: Video mode interface
- * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ * enum dpu_ctl_mode_sel: Interface mode selection
+ * @DPU_CTL_MODE_SEL_VID: Video mode interface
+ * @DPU_CTL_MODE_SEL_CMD: Command mode interface
*/
enum dpu_ctl_mode_sel {
DPU_CTL_MODE_SEL_VID = 0,
@@ -37,6 +37,7 @@ struct dpu_hw_stage_cfg {
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
* @intf_master: Master interface id in the dual pipe topology
+ * @wb: Writeback mode
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -64,21 +65,21 @@ struct dpu_hw_intf_cfg {
*/
struct dpu_hw_ctl_ops {
/**
- * kickoff hw operation for Sw controlled interfaces
+ * @trigger_start: kickoff hw operation for Sw controlled interfaces
* DSI cmd mode and WB interface are SW controlled
* @ctx : ctl path ctx pointer
*/
void (*trigger_start)(struct dpu_hw_ctl *ctx);
/**
- * check if the ctl is started
+ * @is_started: check if the ctl is started
* @ctx : ctl path ctx pointer
* @Return: true if started, false if stopped
*/
bool (*is_started)(struct dpu_hw_ctl *ctx);
/**
- * kickoff prepare is in progress hw operation for sw
+ * @trigger_pending: kickoff prepare is in progress hw operation for sw
* controlled interfaces: DSI cmd mode and WB interface
* are SW controlled
* @ctx : ctl path ctx pointer
@@ -86,7 +87,7 @@ struct dpu_hw_ctl_ops {
void (*trigger_pending)(struct dpu_hw_ctl *ctx);
/**
- * Clear the value of the cached pending_flush_mask
+ * @clear_pending_flush: Clear the value of the cached pending_flush_mask
* No effect on hardware.
* Required to be implemented.
* @ctx : ctl path ctx pointer
@@ -94,14 +95,15 @@ struct dpu_hw_ctl_ops {
void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
/**
- * Query the value of the cached pending_flush_mask
+ * @get_pending_flush: Query the value of the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush: OR in the given flushbits to the cached
+ * pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @flushbits : module flushmask
@@ -110,7 +112,8 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
- * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * @update_pending_flush_wb: OR in the given flushbits to the
+ * cached pending_(wb_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : writeback block index
@@ -119,7 +122,8 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
- * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * @update_pending_flush_cwb: OR in the given flushbits to the
+ * cached pending_(cwb_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : concurrent writeback block index
@@ -128,7 +132,8 @@ struct dpu_hw_ctl_ops {
enum dpu_cwb blk);
/**
- * OR in the given flushbits to the cached pending_(intf_)flush_mask
+ * @update_pending_flush_intf: OR in the given flushbits to the
+ * cached pending_(intf_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -137,7 +142,8 @@ struct dpu_hw_ctl_ops {
enum dpu_intf blk);
/**
- * OR in the given flushbits to the cached pending_(periph_)flush_mask
+ * @update_pending_flush_periph: OR in the given flushbits to the
+ * cached pending_(periph_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -146,7 +152,8 @@ struct dpu_hw_ctl_ops {
enum dpu_intf blk);
/**
- * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
+ * @update_pending_flush_merge_3d: OR in the given flushbits to the
+ * cached pending_(merge_3d_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -155,7 +162,8 @@ struct dpu_hw_ctl_ops {
enum dpu_merge_3d blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_sspp: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : SSPP block index
@@ -164,7 +172,8 @@ struct dpu_hw_ctl_ops {
enum dpu_sspp blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_mixer: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : LM block index
@@ -173,7 +182,8 @@ struct dpu_hw_ctl_ops {
enum dpu_lm blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_dspp: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : DSPP block index
@@ -183,7 +193,8 @@ struct dpu_hw_ctl_ops {
enum dpu_dspp blk, u32 dspp_sub_blk);
/**
- * OR in the given flushbits to the cached pending_(dsc_)flush_mask
+ * @update_pending_flush_dsc: OR in the given flushbits to the
+ * cached pending_(dsc_)flush_mask.
* No effect on hardware
* @ctx: ctl path ctx pointer
* @blk: interface block index
@@ -192,7 +203,8 @@ struct dpu_hw_ctl_ops {
enum dpu_dsc blk);
/**
- * OR in the given flushbits to the cached pending_(cdm_)flush_mask
+ * @update_pending_flush_cdm: OR in the given flushbits to the
+ * cached pending_(cdm_)flush_mask.
* No effect on hardware
* @ctx: ctl path ctx pointer
* @cdm_num: idx of cdm to be flushed
@@ -200,20 +212,20 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num);
/**
- * Write the value of the pending_flush_mask to hardware
+ * @trigger_flush: Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
void (*trigger_flush)(struct dpu_hw_ctl *ctx);
/**
- * Read the value of the flush register
+ * @get_flush_register: Read the value of the flush register
* @ctx : ctl path ctx pointer
* @Return: value of the ctl flush register.
*/
u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
/**
- * Setup ctl_path interface config
+ * @setup_intf_cfg: Setup ctl_path interface config
* @ctx
* @cfg : interface config structure pointer
*/
@@ -221,17 +233,20 @@ struct dpu_hw_ctl_ops {
struct dpu_hw_intf_cfg *cfg);
/**
- * reset ctl_path interface config
+ * @reset_intf_cfg: reset ctl_path interface config
* @ctx : ctl path ctx pointer
* @cfg : interface config structure pointer
*/
void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
+ /**
+ * @reset: reset function for this ctl type
+ */
int (*reset)(struct dpu_hw_ctl *c);
- /*
- * wait_reset_status - checks ctl reset status
+ /**
+ * @wait_reset_status: checks ctl reset status
* @ctx : ctl path ctx pointer
*
* This function checks the ctl reset status bit.
@@ -242,13 +257,13 @@ struct dpu_hw_ctl_ops {
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
/**
- * Set all blend stages to disabled
+ * @clear_all_blendstages: Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
/**
- * Configure layer mixer to pipe configuration
+ * @setup_blendstage: Configure layer mixer to pipe configuration
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
@@ -256,11 +271,16 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+ /**
+ * @set_active_fetch_pipes: Set active pipes attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_pipes: bitmap of enum dpu_sspp
+ */
void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
/**
- * Set active pipes attached to this CTL
+ * @set_active_pipes: Set active pipes attached to this CTL
* @ctx: ctl path ctx pointer
* @active_pipes: bitmap of enum dpu_sspp
*/
@@ -268,13 +288,12 @@ struct dpu_hw_ctl_ops {
unsigned long *active_pipes);
/**
- * Set active layer mixers attached to this CTL
+ * @set_active_lms: Set active layer mixers attached to this CTL
* @ctx: ctl path ctx pointer
* @active_lms: bitmap of enum dpu_lm
*/
void (*set_active_lms)(struct dpu_hw_ctl *ctx,
unsigned long *active_lms);
-
};
/**
@@ -289,6 +308,9 @@ struct dpu_hw_ctl_ops {
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @pending_cwb_flush_mask: pending CWB flush
+ * @pending_periph_flush_mask: pending PERIPH flush
+ * @pending_merge_3d_flush_mask: pending MERGE 3D flush
+ * @pending_dspp_flush_mask: pending DSPP flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @mdss_ver: MDSS revision information
@@ -320,7 +342,7 @@ struct dpu_hw_ctl {
};
/**
- * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * to_dpu_hw_ctl - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
index 96b6edf6b2bb..ed7bfcee7f1c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
@@ -28,7 +28,6 @@ struct dpu_hw_cwb_setup_cfg {
};
/**
- *
* struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
* @config_cwb: configure CWB mux
*/
@@ -54,7 +53,7 @@ struct dpu_hw_cwb {
};
/**
- * dpu_hw_cwb - convert base object dpu_hw_base to container
+ * to_dpu_hw_cwb - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index cc7cc6f6f7cd..39d93b9df051 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -21,13 +21,13 @@ struct dpu_hw_dsc;
*/
struct dpu_hw_dsc_ops {
/**
- * dsc_disable - disable dsc
+ * @dsc_disable: disable dsc
* @hw_dsc: Pointer to dsc context
*/
void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
/**
- * dsc_config - configures dsc encoder
+ * @dsc_config: configures dsc encoder
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
* @mode: dsc topology mode to be set
@@ -39,13 +39,17 @@ struct dpu_hw_dsc_ops {
u32 initial_lines);
/**
- * dsc_config_thresh - programs panel thresholds
+ * @dsc_config_thresh: programs panel thresholds
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc);
+ /**
+ * @dsc_bind_pingpong_blk: binds pixel output from a DSC block
+ * to a pingpong block
+ */
void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
enum dpu_pingpong pp);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index 45c26cd49fa3..722b0f482e9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -22,7 +22,7 @@ struct dpu_hw_pcc_coeff {
};
/**
- * struct dpu_hw_pcc - pcc feature structure
+ * struct dpu_hw_pcc_cfg - pcc feature structure
* @r: red coefficients.
* @g: green coefficients.
* @b: blue coefficients.
@@ -40,7 +40,7 @@ struct dpu_hw_pcc_cfg {
*/
struct dpu_hw_dspp_ops {
/**
- * setup_pcc - setup dspp pcc
+ * @setup_pcc: setup_pcc - setup dspp pcc
* @ctx: Pointer to dspp context
* @cfg: Pointer to configuration
*/
@@ -69,7 +69,7 @@ struct dpu_hw_dspp {
};
/**
- * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * to_dpu_hw_dspp - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index f31067a9aaf1..5a19cd74fa94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -57,11 +57,11 @@ struct dpu_hw_intf_cmd_mode_cfg {
/**
* struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
* Assumption is these functions will be called after clocks are enabled
- * @ setup_timing_gen : programs the timing engine
- * @ setup_prog_fetch : enables/disables the programmable fetch logic
- * @ enable_timing: enable/disable timing engine
- * @ get_status: returns if timing engine is enabled or not
- * @ get_line_count: reads current vertical line counter
+ * @setup_timing_gen : programs the timing engine
+ * @setup_prg_fetch : enables/disables the programmable fetch logic
+ * @enable_timing: enable/disable timing engine
+ * @get_status: returns if timing engine is enabled or not
+ * @get_line_count: reads current vertical line counter
* @bind_pingpong_blk: enable/disable the connection with pingpong which will
* feed pixels to this interface
* @setup_misr: enable/disable MISR
@@ -70,12 +70,9 @@ struct dpu_hw_intf_cmd_mode_cfg {
* pointer and programs the tear check configuration
* @disable_tearcheck: Disables tearcheck block
* @connect_external_te: Read, modify, write to either set or clear listening to external TE
- * Return: 1 if TE was originally connected, 0 if not, or -ERROR
- * @get_vsync_info: Provides the programmed and current line_count
- * @setup_autorefresh: Configure and enable the autorefresh config
- * @get_autorefresh: Retrieve autorefresh config from hardware
- * Return: 0 on success, -ETIMEDOUT on timeout
+ * Returns 1 if TE was originally connected, 0 if not, or -ERROR
* @vsync_sel: Select vsync signal for tear-effect configuration
+ * @disable_autorefresh: Disable autorefresh if enabled
* @program_intf_cmd_cfg: Program the DPU to interface datapath for command mode
*/
struct dpu_hw_intf_ops {
@@ -109,9 +106,6 @@ struct dpu_hw_intf_ops {
void (*vsync_sel)(struct dpu_hw_intf *intf, enum dpu_vsync_source vsync_source);
- /**
- * Disable autorefresh if enabled
- */
void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay);
void (*program_intf_cmd_cfg)(struct dpu_hw_intf *intf,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 1b9ecd082d7f..ecbb77711d83 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -25,39 +25,38 @@ struct dpu_hw_color3_cfg {
};
/**
- *
* struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_lm_ops {
- /*
- * Sets up mixer output width and height
+ /**
+ * @setup_mixer_out: Sets up mixer output width and height
* and border color if enabled
*/
void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
struct dpu_hw_mixer_cfg *cfg);
- /*
- * Alpha blending configuration
+ /**
+ * @setup_blend_config: Alpha blending configuration
* for the specified stage
*/
void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
- /*
- * Alpha color component selection from either fg or bg
+ /**
+ * @setup_alpha_out: Alpha color component selection from either fg or bg
*/
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
- * Clear layer mixer to pipe configuration
+ * @clear_all_blendstages: Clear layer mixer to pipe configuration
* @ctx : mixer ctx pointer
* Returns: 0 on success or -error
*/
int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx);
/**
- * Configure layer mixer to pipe configuration
+ * @setup_blendstage: Configure layer mixer to pipe configuration
* @ctx : mixer ctx pointer
* @lm : layer mixer enumeration
* @stage_cfg : blend stage configuration
@@ -67,19 +66,19 @@ struct dpu_hw_lm_ops {
struct dpu_hw_stage_cfg *stage_cfg);
/**
- * setup_border_color : enable/disable border color
+ * @setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
/**
- * setup_misr: Enable/disable MISR
+ * @setup_misr: Enable/disable MISR
*/
void (*setup_misr)(struct dpu_hw_mixer *ctx);
/**
- * collect_misr: Read MISR signature
+ * @collect_misr: Read MISR signature
*/
int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 31451241f083..046b683d4c66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -34,7 +34,7 @@
#define DPU_MAX_PLANES 4
#endif
-#define STAGES_PER_PLANE 2
+#define STAGES_PER_PLANE 1
#define PIPES_PER_STAGE 2
#define PIPES_PER_PLANE (PIPES_PER_STAGE * STAGES_PER_PLANE)
#ifndef DPU_MAX_DE_CURVES
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index 6833c0207523..b57f88187148 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -12,7 +12,6 @@
struct dpu_hw_merge_3d;
/**
- *
* struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_3d_mode : enable 3D merge
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index dd99e1c21a1e..effd012d864a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -34,7 +34,6 @@ struct dpu_hw_dither_cfg {
};
/**
- *
* struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @enable_tearcheck: program and enable tear check block
@@ -44,51 +43,52 @@ struct dpu_hw_dither_cfg {
*/
struct dpu_hw_pingpong_ops {
/**
- * enables vysnc generation and sets up init value of
+ * @enable_tearcheck: enables vysnc generation and sets up init value of
* read pointer and programs the tear check cofiguration
*/
int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *cfg);
/**
- * disables tear check block
+ * @disable_tearcheck: disables tear check block
*/
int (*disable_tearcheck)(struct dpu_hw_pingpong *pp);
/**
- * read, modify, write to either set or clear listening to external TE
+ * @connect_external_te: read, modify, write to either set or clear
+ * listening to external TE
* @Return: 1 if TE was originally connected, 0 if not, or -ERROR
*/
int (*connect_external_te)(struct dpu_hw_pingpong *pp,
bool enable_external_te);
/**
- * Obtain current vertical line counter
+ * @get_line_count: Obtain current vertical line counter
*/
u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
/**
- * Disable autorefresh if enabled
+ * @disable_autorefresh: Disable autorefresh if enabled
*/
void (*disable_autorefresh)(struct dpu_hw_pingpong *pp, uint32_t encoder_id, u16 vdisplay);
/**
- * Setup dither matix for pingpong block
+ * @setup_dither: Setup dither matix for pingpong block
*/
void (*setup_dither)(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg);
/**
- * Enable DSC
+ * @enable_dsc: Enable DSC
*/
int (*enable_dsc)(struct dpu_hw_pingpong *pp);
/**
- * Disable DSC
+ * @disable_dsc: Disable DSC
*/
void (*disable_dsc)(struct dpu_hw_pingpong *pp);
/**
- * Setup DSC
+ * @setup_dsc: Setup DSC
*/
int (*setup_dsc)(struct dpu_hw_pingpong *pp);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index bdac5c04bf79..3822094f85bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -14,7 +14,7 @@ struct dpu_hw_sspp;
#define DPU_SSPP_MAX_PITCH_SIZE 0xffff
-/**
+/*
* Flags
*/
#define DPU_SSPP_FLIP_LR BIT(0)
@@ -23,7 +23,7 @@ struct dpu_hw_sspp;
#define DPU_SSPP_ROT_90 BIT(3)
#define DPU_SSPP_SOLID_FILL BIT(4)
-/**
+/*
* Component indices
*/
enum {
@@ -36,9 +36,10 @@ enum {
};
/**
- * DPU_SSPP_RECT_SOLO - multirect disabled
- * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
- * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ * enum dpu_sspp_multirect_index - multirect mode
+ * @DPU_SSPP_RECT_SOLO: multirect disabled
+ * @DPU_SSPP_RECT_0: rect0 of a multirect pipe
+ * @DPU_SSPP_RECT_1: rect1 of a multirect pipe
*
* Note: HW supports multirect with either RECT0 or
* RECT1. Considering no benefit of such configs over
@@ -143,7 +144,7 @@ struct dpu_hw_pixel_ext {
* struct dpu_sw_pipe_cfg : software pipe configuration
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
- * @dest_rect: destination ROI.
+ * @dst_rect: destination ROI.
* @rotation: simplified drm rotation hint
*/
struct dpu_sw_pipe_cfg {
@@ -165,8 +166,8 @@ struct dpu_hw_pipe_ts_cfg {
/**
* struct dpu_sw_pipe - software pipe description
* @sspp: backing SSPP pipe
- * @index: index of the rectangle of SSPP
- * @mode: parallel or time multiplex multirect mode
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
*/
struct dpu_sw_pipe {
struct dpu_hw_sspp *sspp;
@@ -181,7 +182,7 @@ struct dpu_sw_pipe {
*/
struct dpu_hw_sspp_ops {
/**
- * setup_format - setup pixel format cropping rectangle, flip
+ * @setup_format: setup pixel format cropping rectangle, flip
* @pipe: Pointer to software pipe context
* @cfg: Pointer to pipe config structure
* @flags: Extra flags for format config
@@ -190,7 +191,7 @@ struct dpu_hw_sspp_ops {
const struct msm_format *fmt, u32 flags);
/**
- * setup_rects - setup pipe ROI rectangles
+ * @setup_rects: setup pipe ROI rectangles
* @pipe: Pointer to software pipe context
* @cfg: Pointer to pipe config structure
*/
@@ -198,7 +199,7 @@ struct dpu_hw_sspp_ops {
struct dpu_sw_pipe_cfg *cfg);
/**
- * setup_pe - setup pipe pixel extension
+ * @setup_pe: setup pipe pixel extension
* @ctx: Pointer to pipe context
* @pe_ext: Pointer to pixel ext settings
*/
@@ -206,7 +207,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_pixel_ext *pe_ext);
/**
- * setup_sourceaddress - setup pipe source addresses
+ * @setup_sourceaddress: setup pipe source addresses
* @pipe: Pointer to software pipe context
* @layout: format layout information for programming buffer to hardware
*/
@@ -214,14 +215,14 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_fmt_layout *layout);
/**
- * setup_csc - setup color space coversion
+ * @setup_csc: setup color space coversion
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
void (*setup_csc)(struct dpu_hw_sspp *ctx, const struct dpu_csc_cfg *data);
/**
- * setup_solidfill - enable/disable colorfill
+ * @setup_solidfill: enable/disable colorfill
* @pipe: Pointer to software pipe context
* @const_color: Fill color value
* @flags: Pipe flags
@@ -229,23 +230,22 @@ struct dpu_hw_sspp_ops {
void (*setup_solidfill)(struct dpu_sw_pipe *pipe, u32 color);
/**
- * setup_multirect - setup multirect configuration
+ * @setup_multirect: setup multirect configuration
* @pipe: Pointer to software pipe context
*/
void (*setup_multirect)(struct dpu_sw_pipe *pipe);
/**
- * setup_sharpening - setup sharpening
+ * @setup_sharpening: setup sharpening
* @ctx: Pointer to pipe context
* @cfg: Pointer to config structure
*/
void (*setup_sharpening)(struct dpu_hw_sspp *ctx,
struct dpu_hw_sharp_cfg *cfg);
-
/**
- * setup_qos_lut - setup QoS LUTs
+ * @setup_qos_lut: setup QoS LUTs
* @ctx: Pointer to pipe context
* @cfg: LUT configuration
*/
@@ -253,7 +253,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_qos_cfg *cfg);
/**
- * setup_qos_ctrl - setup QoS control
+ * @setup_qos_ctrl: setup QoS control
* @ctx: Pointer to pipe context
* @danger_safe_en: flags controlling enabling of danger/safe QoS/LUT
*/
@@ -261,7 +261,7 @@ struct dpu_hw_sspp_ops {
bool danger_safe_en);
/**
- * setup_clk_force_ctrl - setup clock force control
+ * @setup_clk_force_ctrl: setup clock force control
* @ctx: Pointer to pipe context
* @enable: enable clock force if true
*/
@@ -269,7 +269,7 @@ struct dpu_hw_sspp_ops {
bool enable);
/**
- * setup_histogram - setup histograms
+ * @setup_histogram: setup histograms
* @ctx: Pointer to pipe context
* @cfg: Pointer to histogram configuration
*/
@@ -277,7 +277,7 @@ struct dpu_hw_sspp_ops {
void *cfg);
/**
- * setup_scaler - setup scaler
+ * @setup_scaler: setup scaler
* @scaler3_cfg: Pointer to scaler configuration
* @format: pixel format parameters
*/
@@ -286,7 +286,7 @@ struct dpu_hw_sspp_ops {
const struct msm_format *format);
/**
- * setup_cdp - setup client driven prefetch
+ * @setup_cdp: setup client driven prefetch
* @pipe: Pointer to software pipe context
* @fmt: format used by the sw pipe
* @enable: whether the CDP should be enabled for this pipe
@@ -303,6 +303,7 @@ struct dpu_hw_sspp_ops {
* @ubwc: UBWC configuration data
* @idx: pipe index
* @cap: pointer to layer_cfg
+ * @mdss_ver: MDSS version info to use for feature checks
* @ops: pointer to operations possible for this pipe
*/
struct dpu_hw_sspp {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 04efdcd21ceb..80279d87c2cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -77,12 +77,11 @@ enum dpu_dp_phy_sel {
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
- * @setup_split_pipe : Programs the pipe control registers
- * @setup_pp_split : Programs the pp split control registers
- * @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
- /** setup_split_pipe() : Registers are not double buffered, thisk
+ /**
+ * @setup_split_pipe : Programs the pipe control registers.
+ * Registers are not double buffered, this
* function should be called before timing control enable
* @mdp : mdp top context driver
* @cfg : upper and lower part of pipe configuration
@@ -91,7 +90,7 @@ struct dpu_hw_mdp_ops {
struct split_pipe_cfg *p);
/**
- * setup_traffic_shaper() : Setup traffic shaper control
+ * @setup_traffic_shaper : programs traffic shaper control.
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
*/
@@ -99,7 +98,7 @@ struct dpu_hw_mdp_ops {
struct traffic_shaper_cfg *cfg);
/**
- * setup_clk_force_ctrl - set clock force control
+ * @setup_clk_force_ctrl: set clock force control
* @mdp: mdp top context driver
* @clk_ctrl: clock to be controlled
* @enable: force on enable
@@ -109,7 +108,7 @@ struct dpu_hw_mdp_ops {
enum dpu_clk_ctrl_type clk_ctrl, bool enable);
/**
- * get_danger_status - get danger status
+ * @get_danger_status: get danger status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
@@ -117,7 +116,7 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
- * setup_vsync_source - setup vsync source configuration details
+ * @setup_vsync_source: setup vsync source configuration details
* @mdp: mdp top context driver
* @cfg: vsync source selection configuration
*/
@@ -125,7 +124,7 @@ struct dpu_hw_mdp_ops {
struct dpu_vsync_source_cfg *cfg);
/**
- * get_safe_status - get safe status
+ * @get_safe_status: get safe status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
@@ -133,14 +132,14 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
- * dp_phy_intf_sel - configure intf to phy mapping
+ * @dp_phy_intf_sel: configure intf to phy mapping
* @mdp: mdp top context driver
* @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
*/
void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
/**
- * intf_audio_select - select the external interface for audio
+ * @intf_audio_select: select the external interface for audio
* @mdp: mdp top context driver
*/
void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index 285121ec804c..9ac49448e432 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -17,7 +17,7 @@ struct dpu_hw_vbif;
*/
struct dpu_hw_vbif_ops {
/**
- * set_limit_conf - set transaction limit config
+ * @set_limit_conf: set transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
@@ -27,7 +27,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool rd, u32 limit);
/**
- * get_limit_conf - get transaction limit config
+ * @get_limit_conf: get transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
@@ -37,7 +37,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool rd);
/**
- * set_halt_ctrl - set halt control
+ * @set_halt_ctrl: set halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @enable: halt control enable
@@ -46,7 +46,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool enable);
/**
- * get_halt_ctrl - get halt control
+ * @get_halt_ctrl: get halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @return: halt control enable
@@ -55,7 +55,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id);
/**
- * set_qos_remap - set QoS priority remap
+ * @set_qos_remap: set QoS priority remap
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @level: priority level
@@ -65,7 +65,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, u32 level, u32 remap_level);
/**
- * set_mem_type - set memory type
+ * @set_mem_type: set memory type
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @value: memory type value
@@ -74,7 +74,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, u32 value);
/**
- * clear_errors - clear any vbif errors
+ * @clear_errors: clear any vbif errors
* This function clears any detected pending/source errors
* on the VBIF interface, and optionally returns the detected
* error mask(s).
@@ -86,7 +86,7 @@ struct dpu_hw_vbif_ops {
u32 *pnd_errors, u32 *src_errors);
/**
- * set_write_gather_en - set write_gather enable
+ * @set_write_gather_en: set write_gather enable
* @vbif: vbif context driver
* @xin_id: client interface identifier
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index ee5e5ab786e1..cfdbb5bb2a0f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -22,11 +22,11 @@ struct dpu_hw_wb_cfg {
};
/**
- *
* struct dpu_hw_wb_ops : Interface to the wb hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_outaddress: setup output address from the writeback job
* @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_roi: setup ROI (Region of Interest) parameters
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
* @setup_clk_force_ctrl: setup clock force control
@@ -61,7 +61,7 @@ struct dpu_hw_wb_ops {
* struct dpu_hw_wb : WB driver object
* @hw: block hardware details
* @idx: hardware index number within type
- * @wb_hw_caps: hardware capabilities
+ * @caps: hardware capabilities
* @ops: function pointers
*/
struct dpu_hw_wb {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index d07a6ab6e7ee..9b7a8b46bfa9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -826,12 +826,8 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe_cfg *pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg;
- struct dpu_sw_pipe_cfg init_pipe_cfg;
struct drm_rect fb_rect = { 0 };
- const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
uint32_t max_linewidth;
- u32 num_lm;
- int stage_id, num_stages;
min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO);
max_scale = MAX_DOWNSCALE_RATIO << 16;
@@ -854,10 +850,13 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return -EINVAL;
}
- num_lm = dpu_crtc_get_num_lm(crtc_state);
-
+ /* move the assignment here, to ease handling to another pairs later */
+ pipe_cfg = &pstate->pipe_cfg[0];
+ r_pipe_cfg = &pstate->pipe_cfg[1];
/* state->src is 16.16, src_rect is not */
- drm_rect_fp_to_int(&init_pipe_cfg.src_rect, &new_plane_state->src);
+ drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
+
+ pipe_cfg->dst_rect = new_plane_state->dst;
fb_rect.x2 = new_plane_state->fb->width;
fb_rect.y2 = new_plane_state->fb->height;
@@ -882,94 +881,35 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth;
- drm_rect_rotate(&init_pipe_cfg.src_rect,
+ drm_rect_rotate(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- /*
- * We have 1 mixer pair cfg for 1:1:1 and 2:2:1 topology, 2 mixer pair
- * configs for left and right half screen in case of 4:4:2 topology.
- * But we may have 2 rect to split wide plane that exceeds limit with 1
- * config for 2:2:1. So need to handle both wide plane splitting, and
- * two halves of screen splitting for quad-pipe case. Check dest
- * rectangle left/right clipping first, then check wide rectangle
- * splitting in every half next.
- */
- num_stages = (num_lm + 1) / 2;
- /* iterate mixer configs for this plane, to separate left/right with the id */
- for (stage_id = 0; stage_id < num_stages; stage_id++) {
- struct drm_rect mixer_rect = {
- .x1 = stage_id * mode->hdisplay / num_stages,
- .y1 = 0,
- .x2 = (stage_id + 1) * mode->hdisplay / num_stages,
- .y2 = mode->vdisplay
- };
- int cfg_idx = stage_id * PIPES_PER_STAGE;
-
- pipe_cfg = &pstate->pipe_cfg[cfg_idx];
- r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1];
-
- drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
- pipe_cfg->dst_rect = new_plane_state->dst;
-
- DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT
- " vs clip window " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect),
- DRM_RECT_ARG(&mixer_rect));
-
- /*
- * If this plane does not fall into mixer rect, check next
- * mixer rect.
- */
- if (!drm_rect_clip_scaled(&pipe_cfg->src_rect,
- &pipe_cfg->dst_rect,
- &mixer_rect)) {
- memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg));
-
- continue;
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
+ if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
}
- pipe_cfg->dst_rect.x1 -= mixer_rect.x1;
- pipe_cfg->dst_rect.x2 -= mixer_rect.x1;
-
- DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect));
-
- /* Split wide rect into 2 rect */
- if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
- _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) {
-
- if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
-
- memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg));
- pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
- pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
- r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
- r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
- DPU_DEBUG_PLANE(pdpu, "Split wide plane into:"
- DRM_RECT_FMT " and " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect),
- DRM_RECT_ARG(&r_pipe_cfg->src_rect));
- } else {
- memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg));
- }
+ *r_pipe_cfg = *pipe_cfg;
+ pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
+ pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
+ r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ } else {
+ memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg));
+ }
- drm_rect_rotate_inv(&pipe_cfg->src_rect,
- new_plane_state->fb->width,
- new_plane_state->fb->height,
+ drm_rect_rotate_inv(&pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
+ new_plane_state->rotation);
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
+ drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
- drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
- new_plane_state->fb->width,
- new_plane_state->fb->height,
- new_plane_state->rotation);
- }
-
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
@@ -1045,17 +985,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
drm_atomic_get_new_plane_state(state, plane);
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe *pipe;
- struct dpu_sw_pipe_cfg *pipe_cfg;
- int ret = 0, i;
+ struct dpu_sw_pipe *pipe = &pstate->pipe[0];
+ struct dpu_sw_pipe *r_pipe = &pstate->pipe[1];
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0];
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1];
+ int ret = 0;
- for (i = 0; i < PIPES_PER_PLANE; i++) {
- pipe = &pstate->pipe[i];
- pipe_cfg = &pstate->pipe_cfg[i];
- if (!drm_rect_width(&pipe_cfg->src_rect))
- continue;
- DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i);
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
+ if (ret)
+ return ret;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
&crtc_state->adjusted_mode,
new_plane_state);
if (ret)
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.h b/drivers/gpu/drm/msm/disp/mdp_format.h
index a00d646ff4d4..915954bf5dc7 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.h
+++ b/drivers/gpu/drm/msm/disp/mdp_format.h
@@ -24,7 +24,7 @@ enum msm_format_flags {
#define MSM_FORMAT_FLAG_UNPACK_TIGHT BIT(MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT)
#define MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB BIT(MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT)
-/**
+/*
* DPU HW,Component order color map
*/
enum {
@@ -37,6 +37,10 @@ enum {
/**
* struct msm_format: defines the format configuration
* @pixel_format: format fourcc
+ * @bpc_g_y: element bit widths: BPC for G or Y
+ * @bpc_b_cb: element bit widths: BPC for B or Cb
+ * @bpc_r_cr: element bit widths: BPC for R or Cr
+ * @bpc_a: element bit widths: BPC for the alpha channel
* @element: element color ordering
* @fetch_type: how the color components are packed in pixel format
* @chroma_sample: chroma sub-samplng type
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 6dc0ff4f0f65..a90083fec856 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -12,7 +12,7 @@
#if defined(CONFIG_DEBUG_FS)
/**
- * msm_dp_debug_get() - configure and get the DisplayPlot debug module data
+ * msm_dp_debug_init() - configure and get the DisplayPlot debug module data
*
* @dev: device instance of the caller
* @panel: instance of panel module
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9a461ab2f32f..fd6443d2b6ce 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -18,6 +18,7 @@
/**
* msm_dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
+ * @connector: Pointer to drm connector structure
* Returns: Bridge's 'is connected' status
*/
static enum drm_connector_status
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index b1eb2de6d2a7..8460e4ad2d35 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -80,11 +80,11 @@ struct msm_dp_link {
};
/**
- * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * msm_dp_link_bit_depth_to_bpp() - convert test bit depth to bpp
* @tbd: test bit depth
*
- * Returns the bits per pixel (bpp) to be used corresponding to the
- * git bit depth value. This function assumes that bit depth has
+ * Returns: the bits per pixel (bpp) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
* already been validated.
*/
static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd)
@@ -120,7 +120,8 @@ bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum
/**
* msm_dp_link_get() - get the functionalities of dp test module
- *
+ * @dev: kernel device structure
+ * @aux: DisplayPort AUX channel
*
* return: a pointer to msm_dp_link struct
*/
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 921a296852d4..177c1328fd99 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -63,9 +63,9 @@ void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel);
/**
* is_link_rate_valid() - validates the link rate
- * @lane_rate: link rate requested by the sink
+ * @bw_code: link rate requested by the sink
*
- * Returns true if the requested link rate is supported.
+ * Returns: true if the requested link rate is supported.
*/
static inline bool is_link_rate_valid(u32 bw_code)
{
@@ -76,10 +76,10 @@ static inline bool is_link_rate_valid(u32 bw_code)
}
/**
- * msm_dp_link_is_lane_count_valid() - validates the lane count
+ * is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
- * Returns true if the requested lane count is supported.
+ * Returns: true if the requested lane count is supported.
*/
static inline bool is_lane_count_valid(u32 lane_count)
{
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 148196375a0b..3317a485beef 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -16,34 +16,29 @@
* incrementing fence seqno at the end of each submit
*/
struct msm_fence_context {
+ /** @dev: the drm device */
struct drm_device *dev;
- /** name: human readable name for fence timeline */
+ /** @name: human readable name for fence timeline */
char name[32];
- /** context: see dma_fence_context_alloc() */
+ /** @context: see dma_fence_context_alloc() */
unsigned context;
- /** index: similar to context, but local to msm_fence_context's */
+ /** @index: similar to context, but local to msm_fence_context's */
unsigned index;
-
/**
- * last_fence:
- *
+ * @last_fence:
* Last assigned fence, incremented each time a fence is created
* on this fence context. If last_fence == completed_fence,
* there is no remaining pending work
*/
uint32_t last_fence;
-
/**
- * completed_fence:
- *
+ * @completed_fence:
* The last completed fence, updated from the CPU after interrupt
* from GPU
*/
uint32_t completed_fence;
-
/**
- * fenceptr:
- *
+ * @fenceptr:
* The address that the GPU directly writes with completed fence
* seqno. This can be ahead of completed_fence. We can peek at
* this to see if a fence has already signaled but the CPU hasn't
@@ -51,6 +46,9 @@ struct msm_fence_context {
*/
volatile uint32_t *fenceptr;
+ /**
+ * @spinlock: fence context spinlock
+ */
spinlock_t spinlock;
/*
@@ -59,18 +57,22 @@ struct msm_fence_context {
* don't queue, so maybe that is ok
*/
- /** next_deadline: Time of next deadline */
+ /** @next_deadline: Time of next deadline */
ktime_t next_deadline;
-
/**
- * next_deadline_fence:
- *
+ * @next_deadline_fence:
* Fence value for next pending deadline. The deadline timer is
* canceled when this fence is signaled.
*/
uint32_t next_deadline_fence;
-
+ /**
+ * @deadline_timer: tracks nearest deadline of a fence timeline and
+ * expires just before it.
+ */
struct hrtimer deadline_timer;
+ /**
+ * @deadline_work: work to do after deadline_timer expires
+ */
struct kthread_work deadline_work;
};
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 71d5238437eb..8f7c90167447 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -65,7 +65,7 @@ struct msm_vm_unmap_op {
};
/**
- * struct msm_vma_op - A MAP or UNMAP operation
+ * struct msm_vm_op - A MAP or UNMAP operation
*/
struct msm_vm_op {
/** @op: The operation type */
@@ -798,6 +798,9 @@ static const struct drm_sched_backend_ops msm_vm_bind_ops = {
* synchronous operations are supported. In a user managed VM, userspace
* handles virtual address allocation, and both async and sync operations
* are supported.
+ *
+ * Returns: pointer to the created &struct drm_gpuvm on success
+ * or an ERR_PTR(-errno) on failure.
*/
struct drm_gpuvm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2894fc118485..666cf499b7ec 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -116,15 +116,12 @@ struct msm_gpu_fault_info {
* struct msm_gpu_devfreq - devfreq related state
*/
struct msm_gpu_devfreq {
- /** devfreq: devfreq instance */
+ /** @devfreq: devfreq instance */
struct devfreq *devfreq;
-
- /** lock: lock for "suspended", "busy_cycles", and "time" */
+ /** @lock: lock for "suspended", "busy_cycles", and "time" */
struct mutex lock;
-
/**
- * idle_freq:
- *
+ * @idle_freq:
* Shadow frequency used while the GPU is idle. From the PoV of
* the devfreq governor, we are continuing to sample busyness and
* adjust frequency while the GPU is idle, but we use this shadow
@@ -132,43 +129,34 @@ struct msm_gpu_devfreq {
* it is inactive.
*/
unsigned long idle_freq;
-
/**
- * boost_constraint:
- *
+ * @boost_freq:
* A PM QoS constraint to boost min freq for a period of time
* until the boost expires.
*/
struct dev_pm_qos_request boost_freq;
-
/**
- * busy_cycles: Last busy counter value, for calculating elapsed busy
+ * @busy_cycles: Last busy counter value, for calculating elapsed busy
* cycles since last sampling period.
*/
u64 busy_cycles;
-
- /** time: Time of last sampling period. */
+ /** @time: Time of last sampling period. */
ktime_t time;
-
- /** idle_time: Time of last transition to idle: */
+ /** @idle_time: Time of last transition to idle. */
ktime_t idle_time;
-
/**
- * idle_work:
- *
+ * @idle_work:
* Used to delay clamping to idle freq on active->idle transition.
*/
struct msm_hrtimer_work idle_work;
-
/**
- * boost_work:
- *
+ * @boost_work:
* Used to reset the boost_constraint after the boost period has
* elapsed
*/
struct msm_hrtimer_work boost_work;
- /** suspended: tracks if we're suspended */
+ /** @suspended: tracks if we're suspended */
bool suspended;
};
@@ -358,57 +346,43 @@ struct msm_gpu_perfcntr {
struct msm_context {
/** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
-
/** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
-
/**
* @queueid:
- *
* Counter incremented each time a submitqueue is created, used to
* assign &msm_gpu_submitqueue.id
*/
int queueid;
-
/**
* @closed: The device file associated with this context has been closed.
- *
* Once the device is closed, any submits that have not been written
* to the ring buffer are no-op'd.
*/
bool closed;
-
/**
* @userspace_managed_vm:
- *
* Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
* MSM_PARAM_EN_VM_BIND?
*/
bool userspace_managed_vm;
-
/**
* @vm:
- *
* The per-process GPU address-space. Do not access directly, use
* msm_context_vm().
*/
struct drm_gpuvm *vm;
-
- /** @kref: the reference count */
+ /** @ref: the reference count */
struct kref ref;
-
/**
* @seqno:
- *
* A unique per-process sequence number. Used to detect context
* switches, without relying on keeping a, potentially dangling,
* pointer to the previous context.
*/
int seqno;
-
/**
* @sysprof:
- *
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
* pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
@@ -423,40 +397,32 @@ struct msm_context {
* file is closed.
*/
int sysprof;
-
/**
* @comm: Overridden task comm, see MSM_PARAM_COMM
*
* Accessed under msm_gpu::lock
*/
char *comm;
-
/**
* @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
*
* Accessed under msm_gpu::lock
*/
char *cmdline;
-
/**
- * @elapsed:
- *
+ * @elapsed_ns:
* The total (cumulative) elapsed time GPU was busy with rendering
* from this context in ns.
*/
uint64_t elapsed_ns;
-
/**
* @cycles:
- *
* The total (cumulative) GPU cycles elapsed attributed to this
* context.
*/
uint64_t cycles;
-
/**
* @entities:
- *
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
* make assumptions about rendering from multiple gl contexts
@@ -466,10 +432,8 @@ struct msm_context {
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
-
/**
* @ctx_mem:
- *
* Total amount of memory of GEM buffers with handles attached for
* this context.
*/
@@ -479,7 +443,7 @@ struct msm_context {
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
/**
- * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ * msm_context_is_vmbind() - has userspace opted in to VM_BIND?
*
* @ctx: the drm_file context
*
@@ -487,6 +451,8 @@ struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx
* do sparse binding including having multiple, potentially partial,
* mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
* SET_IOVA) are rejected because they don't have a sensible meaning.
+ *
+ * Returns: %true if userspace is managing the VM, %false otherwise.
*/
static inline bool
msm_context_is_vmbind(struct msm_context *ctx)
@@ -518,6 +484,8 @@ msm_context_is_vmbind(struct msm_context *ctx)
* This allows generations without preemption (nr_rings==1) to have some
* amount of prioritization, and provides more priority levels for gens
* that do have preemption.
+ *
+ * Returns: %0 on success, %-errno on error.
*/
static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
unsigned *ring_nr, enum drm_sched_priority *sched_prio)
@@ -541,7 +509,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
}
/**
- * struct msm_gpu_submitqueues - Userspace created context.
+ * struct msm_gpu_submitqueue - Userspace created context.
*
* A submitqueue is associated with a gl context or vk queue (or equiv)
* in userspace.
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a188617653e8..d5dede4ff761 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -364,7 +364,7 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
}
/**
- * alloc_pt() - Custom page table allocator
+ * msm_iommu_pagetable_alloc_pt() - Custom page table allocator
* @cookie: Cookie passed at page table allocation time.
* @size: Size of the page table. This size should be fixed,
* and determined at creation time based on the granule size.
@@ -416,7 +416,7 @@ msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
/**
- * free_pt() - Custom page table free function
+ * msm_iommu_pagetable_free_pt() - Custom page table free function
* @cookie: Cookie passed at page table allocation time.
* @data: Page table to free.
* @size: Size of the page table. This size should be fixed,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index d3c7889aaf26..c369d4acc378 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -65,13 +65,13 @@ static int refill_buf(struct msm_perf_state *perf)
if ((perf->cnt++ % 32) == 0) {
/* Header line: */
- n = snprintf(ptr, rem, "%%BUSY");
+ n = scnprintf(ptr, rem, "%%BUSY");
ptr += n;
rem -= n;
for (i = 0; i < gpu->num_perfcntrs; i++) {
const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
- n = snprintf(ptr, rem, "\t%s", perfcntr->name);
+ n = scnprintf(ptr, rem, "\t%s", perfcntr->name);
ptr += n;
rem -= n;
}
@@ -93,21 +93,21 @@ static int refill_buf(struct msm_perf_state *perf)
return ret;
val = totaltime ? 1000 * activetime / totaltime : 0;
- n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
+ n = scnprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
ptr += n;
rem -= n;
for (i = 0; i < ret; i++) {
/* cycle counters (I think).. convert to MHz.. */
val = cntrs[i] / 10000;
- n = snprintf(ptr, rem, "\t%5d.%02d",
+ n = scnprintf(ptr, rem, "\t%5d.%02d",
val / 100, val % 100);
ptr += n;
rem -= n;
}
}
- n = snprintf(ptr, rem, "\n");
+ n = scnprintf(ptr, rem, "\n");
ptr += n;
rem -= n;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 93f8f4f64578..b43c4f9bbcdf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(statec))
return (void *)statec;
+
+ return nv50_head_atom(statec);
+}
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!statec)
+ return NULL;
+
return nv50_head_atom(statec);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index a95ee5dcc2e3..1a889139cb05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -84,6 +84,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
+ nv50_atom(asyh->state.state)->lock_core = true;
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 3dd742b4f823..e32ed1db6c56 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -43,6 +43,9 @@ nv50_head_flush_clr(struct nv50_head *head,
union nv50_head_atom_mask clr = {
.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
};
+
+ lockdep_assert_held(&head->disp->mutex);
+
if (clr.crc) nv50_crc_atomic_clr(head);
if (clr.olut) head->func->olut_clr(head);
if (clr.core) head->func->core_clr(head);
@@ -65,6 +68,8 @@ nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
+ lockdep_assert_held(&head->disp->mutex);
+
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ef9e410babbf..9a2c20fce0f3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
- asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
if (IS_ERR(asyh))
return PTR_ERR(asyh);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index 35d1fcef520b..c456a9626823 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -30,6 +30,9 @@ ad102_gsp = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 503760246660..851140e80122 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -337,18 +337,12 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
-nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
}
-void
-nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
-{
- nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
-}
-
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index d201e8697226..27a13aeccd3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -47,6 +47,9 @@ ga100_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 917f7e2f6c46..b6b3eb6f4c00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -158,6 +158,9 @@ ga102_gsp_r535 = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 86bdd203bc10..9dd66a2e3801 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -7,9 +7,8 @@ enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
-int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
-void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp);
struct nvkm_gsp_fwif {
int version;
@@ -52,6 +51,11 @@ struct nvkm_gsp_func {
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
+ struct {
+ int (*ctor)(struct nvkm_gsp *);
+ void (*dtor)(struct nvkm_gsp *);
+ } fwsec_sb;
+
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
@@ -67,6 +71,8 @@ extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
+void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_init(struct nvkm_gsp *);
int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
@@ -91,5 +97,18 @@ int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
+static inline int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.ctor)
+ return gsp->func->fwsec_sb.ctor(gsp);
+ return 0;
+}
+
+static inline void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.dtor)
+ gsp->func->fwsec_sb.dtor(gsp);
+}
+
extern const struct nvkm_gsp_func gv100_gsp;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 81e56da0474a..04b642a1f730 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -30,6 +30,18 @@
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+int
+tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ return nvkm_gsp_fwsec_sb_init(gsp);
+}
+
+void
+tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
+}
+
static int
tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -370,6 +382,9 @@ tu102_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 97eb046c25d0..58cf25842421 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -30,6 +30,9 @@ tu116_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 76f6af819037..7a83804fedca 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -1165,6 +1165,7 @@ config DRM_PANEL_VISIONOX_RM69299
tristate "Visionox RM69299"
depends on OF
depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b26b682826bc..162cc58c7b8f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -623,49 +623,6 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
if (IS_ERR(desc))
return ERR_CAST(desc);
- panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
- &panel_simple_funcs, desc->connector_type);
- if (IS_ERR(panel))
- return ERR_CAST(panel);
-
- panel->desc = desc;
-
- panel->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(panel->supply))
- return ERR_CAST(panel->supply);
-
- panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio))
- return dev_err_cast_probe(dev, panel->enable_gpio,
- "failed to request GPIO\n");
-
- err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
- if (err) {
- dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
- return ERR_PTR(err);
- }
-
- ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
- if (ddc) {
- panel->ddc = of_find_i2c_adapter_by_node(ddc);
- of_node_put(ddc);
-
- if (!panel->ddc)
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
- !of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
-
- if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* Optional data-mapping property for overriding bus format */
- err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
- if (err)
- goto free_ddc;
- }
-
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
@@ -690,8 +647,7 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
- err = -EINVAL;
- goto free_ddc;
+ return ERR_PTR(-EINVAL);
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -720,6 +676,49 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
}
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, connector_type);
+ if (IS_ERR(panel))
+ return ERR_CAST(panel);
+
+ panel->desc = desc;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return ERR_CAST(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_cast_probe(dev, panel->enable_gpio,
+ "failed to request GPIO\n");
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return ERR_PTR(err);
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
+ !of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+
+ if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* Optional data-mapping property for overriding bus format */
+ err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
+ if (err)
+ goto free_ddc;
+ }
+
dev_set_drvdata(dev, panel);
/*
@@ -1900,6 +1899,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 7c989b70ab51..a14c86c60d19 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -212,6 +212,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ ctx->panel.prepare_prev_first = true;
+
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index d4839d282689..f6339963e496 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1252,17 +1252,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
- /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
- * pre-allocated BO if the <BO,VM> association exists. Given we
- * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
- * be called immediately, and we have to hold the VM resv lock when
- * calling this function.
- */
- dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->base.base.gpuva.lock);
- dma_resv_unlock(panthor_vm_resv(vm));
op_ctx->map.bo_offset = offset;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 56ff6a3fb483..d7dc83cf7b00 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -295,7 +295,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
- return ret;
+ goto dev_put;
}
ret = pl111_modeset_init(drm);
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 969a8fb0ee9e..f4e71046dc91 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[] __counted_by(ucNumEntries);
+ UCHAR clockInfo[] /*__counted_by(ucNumEntries)*/;
}ClockInfoArray;
typedef struct _NonClockInfoArray{
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index c9fe6aa3e3e3..8604342f9943 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -121,7 +121,7 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
struct drm_crtc *crtc = encoder->crtc;
/* Unconditionally switch to TMDS as FRL is not yet supported */
- gpiod_set_value(hdmi->frl_enable_gpio, 0);
+ gpiod_set_value_cansleep(hdmi->frl_enable_gpio, 0);
if (!crtc || !crtc->state)
return;
@@ -640,6 +640,15 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
}
+static int __maybe_unused dw_hdmi_qp_rockchip_suspend(struct device *dev)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_qp_suspend(dev, hdmi->hdmi);
+
+ return 0;
+}
+
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
@@ -655,7 +664,8 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
}
static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_qp_rockchip_suspend,
+ dw_hdmi_qp_rockchip_resume)
};
struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index cd8380f0eddc..f3950e8476a7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2104,7 +2104,7 @@ static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
* Spin until the previous port_mux figuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
- port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ port_mux_sel == vop2->old_port_sel, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
port_mux_sel, vop2->old_port_sel);
@@ -2124,7 +2124,7 @@ static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
* Spin until the previous layer configuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
- atv_layer_cfg == cfg, 0, 50 * 1000);
+ atv_layer_cfg == cfg, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
atv_layer_cfg, cfg);
@@ -2144,6 +2144,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
u8 layer_sel_id;
unsigned int ofs;
u32 ovl_ctrl;
+ u32 cfg_done;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
@@ -2298,8 +2299,16 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
rk3568_vop2_wait_for_port_mux_done(vop2);
}
- if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
- rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) {
+ cfg_done = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+ cfg_done &= (BIT(vop2->data->nr_vps) - 1);
+ cfg_done &= ~BIT(vp->id);
+ /*
+ * Changes of other VPs' overlays have not taken effect
+ */
+ if (cfg_done)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ }
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
mutex_unlock(&vop2->ovl_lock);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index da670d7eeb2e..de96bfe7562c 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -55,15 +55,6 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
#endif
/*
- * Input parsing
- */
-
-int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-
-/*
* Display modes
*/
diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
index 2f6ac7a09f44..bc27f65b2823 100644
--- a/drivers/gpu/drm/tests/drm_atomic_state_test.c
+++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
@@ -156,24 +156,29 @@ static int set_up_atomic_state(struct kunit *test,
if (connector) {
conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
}
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &drm_atomic_test_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->enable = true;
crtc_state->active = true;
if (connector) {
ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
} else {
// dummy connector mask
crtc_state->connector_mask = DRM_TEST_CONN_0;
@@ -206,7 +211,13 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
// first modeset to enable
+retry_set_up:
ret = set_up_atomic_state(test, priv, old_conn, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_set_up;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -277,13 +288,26 @@ static void drm_test_check_valid_clones(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_set_up:
ret = set_up_atomic_state(test, priv, NULL, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_set_up;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+retry:
crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
crtc_state->encoder_mask = param->encoder_mask;
@@ -292,6 +316,12 @@ static void drm_test_check_valid_clones(struct kunit *test)
crtc_state->mode_changed = true;
ret = drm_atomic_helper_check_modeset(drm, state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, param->expected_result);
drm_modeset_drop_locks(&ctx);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 8bd412735000..70f9aa702143 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -257,10 +257,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -326,10 +332,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -397,10 +409,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -457,10 +475,17 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -518,10 +543,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -580,10 +611,17 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -643,10 +681,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -705,10 +749,17 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -870,10 +921,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -946,10 +1003,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -1022,10 +1085,16 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1069,10 +1138,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1118,10 +1193,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1167,10 +1248,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1218,10 +1305,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1292,10 +1385,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1440,10 +1539,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1669,10 +1774,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1736,10 +1848,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1805,10 +1923,16 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1865,10 +1989,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1927,10 +2057,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1970,10 +2106,17 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 86eb5d97410b..8bb93194e5ac 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -26,9 +26,33 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
- drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+ /*
+ * TI's OLDI and DSI encoders need to be set up before the crtc is
+ * enabled. Thus drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() cannot be used here, as
+ * they enable the crtc before bridges' pre-enable, and disable the crtc
+ * after bridges' post-disable.
+ *
+ * Open code the functions here and first call the bridges' pre-enables,
+ * then crtc enable, then bridges' post-enable (and vice versa for
+ * disable).
+ */
+
+ drm_atomic_helper_commit_encoder_bridge_disable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_disable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(ddev, old_state);
+
+ drm_atomic_helper_update_legacy_modeset_state(ddev, old_state);
+ drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_commit_crtc_set_mode(ddev, old_state);
+
+ drm_atomic_helper_commit_planes(ddev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_enable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_enable(ddev, old_state);
+ drm_atomic_helper_commit_writebacks(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f031a312c783..b22887e8c881 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -32,9 +32,15 @@
#include <drm/ttm/ttm_placement.h>
-static void vmw_bo_release(struct vmw_bo *vbo)
+/**
+ * vmw_bo_free - vmw_bo destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_resource *res;
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
@@ -62,20 +68,8 @@ static void vmw_bo_release(struct vmw_bo *vbo)
}
vmw_surface_unreference(&vbo->dumb_surface);
}
- drm_gem_object_release(&vbo->tbo.base);
-}
-
-/**
- * vmw_bo_free - vmw_bo destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_bo *vbo = to_vmw_bo(&bo->base);
-
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_release(vbo);
+ drm_gem_object_release(&vbo->tbo.base);
WARN_ON(vbo->dirty);
kfree(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 00be92da5509..85795082fef9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,12 +515,12 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
/**
* vmw_event_fence_action_seq_passed
*
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
+ * @f: The struct dma_fence which provides timestamp for the action event
+ * @cb: The struct dma_fence_cb callback for the action event.
*
- * This function is called when the seqno of the fence where @action is
- * attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context.
+ * This function is called when the seqno of the fence has passed
+ * and it is always called from atomic context.
+ * It queues the event on the submitter's event list.
*/
static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
struct dma_fence_cb *cb)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d32ce1cb579e..bc51b5d55e38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -766,13 +766,15 @@ err_out:
return ERR_PTR(ret);
}
- ttm_bo_reserve(&bo->tbo, false, false, NULL);
- ret = vmw_bo_dirty_add(bo);
- if (!ret && surface && surface->res.func->dirty_alloc) {
- surface->res.coherent = true;
- ret = surface->res.func->dirty_alloc(&surface->res);
+ if (bo) {
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
}
- ttm_bo_unreserve(&bo->tbo);
return &vfb->base;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 69dfe69ce0f8..a8c8c9375d29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -923,8 +923,10 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
- if (unlikely(ret != 0))
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
goto no_reserve;
+ }
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index b0bd31d14bb9..bf4ee976b680 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1527,7 +1527,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
* always succeed here, as long as we hold the lru lock.
*/
spin_lock(&ttm_bo->bdev->lru_lock);
- locked = dma_resv_trylock(ttm_bo->base.resv);
+ locked = dma_resv_trylock(&ttm_bo->base._resv);
spin_unlock(&ttm_bo->bdev->lru_lock);
xe_assert(xe, locked);
@@ -1547,13 +1547,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
bo = ttm_to_xe_bo(ttm_bo);
xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
- /*
- * Corner case where TTM fails to allocate memory and this BOs resv
- * still points the VMs resv
- */
- if (ttm_bo->base.resv != &ttm_bo->base._resv)
- return;
-
if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
return;
@@ -1563,14 +1556,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
* TODO: Don't do this for external bos once we scrub them after
* unbind.
*/
- dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
+ dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP, fence) {
if (xe_fence_is_xe_preempt(fence) &&
!dma_fence_is_signaled(fence)) {
if (!replacement)
replacement = dma_fence_get_stub();
- dma_resv_replace_fences(ttm_bo->base.resv,
+ dma_resv_replace_fences(&ttm_bo->base._resv,
fence->context,
replacement,
DMA_RESV_USAGE_BOOKKEEP);
@@ -1578,7 +1571,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
}
dma_fence_put(replacement);
- dma_resv_unlock(ttm_bo->base.resv);
+ dma_resv_unlock(&ttm_bo->base._resv);
}
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index c7d373c70f0f..cf29e259861f 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -1056,7 +1056,7 @@ static void tdf_request_sync(struct xe_device *xe)
* transient and need to be flushed..
*/
if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
- 150, NULL, false))
+ 300, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 54e42960daad..7c74a31d4486 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -124,7 +124,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
case XE_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->ttm.ttm->pages,
- bo->ttm.ttm->num_pages);
+ obj->size >> PAGE_SHIFT);
if (IS_ERR(sgt))
return sgt;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 97dfb7945b7a..a5c36a317a70 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -315,7 +315,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
return -EFAULT;
if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, !ext.property) || XE_IOCTL_DBG(xe, ext.pad))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 4d81210e41f5..fd9480031750 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -132,7 +132,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
- XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) ||
+ XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
return -EINVAL;
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index dbb5e7a9bc6a..cdce210e36f2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -797,9 +797,6 @@ static int do_gt_restart(struct xe_gt *gt)
xe_gt_sriov_pf_init_hw(gt);
xe_mocs_init(gt);
- err = xe_uc_start(&gt->uc);
- if (err)
- return err;
for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
@@ -807,6 +804,10 @@ static int do_gt_restart(struct xe_gt *gt)
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
+ err = xe_uc_start(&gt->uc);
+ if (err)
+ return err;
+
/* Restore GT freq to expected values */
xe_gt_sanitize_freq(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 849ea6c86e8e..ce3c7810469f 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -293,8 +293,10 @@ int xe_gt_freq_init(struct xe_gt *gt)
return -ENOMEM;
err = sysfs_create_files(gt->freq, freq_attrs);
- if (err)
+ if (err) {
+ kobject_put(gt->freq);
return err;
+ }
err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index bdc9d9877ec4..3e3d1d52f630 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -5,6 +5,7 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
#include "xe_force_wake.h"
#include "xe_device.h"
#include "xe_gt.h"
@@ -16,6 +17,7 @@
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_wa.h"
/**
* DOC: Xe GT Idle
@@ -145,6 +147,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
}
+ if (XE_GT_WA(gt, 14020316580))
+ gtidle->powergate_enable &= ~(VDN_HCP_POWERGATE_ENABLE(0) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(0) |
+ VDN_HCP_POWERGATE_ENABLE(2) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(2));
+
xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4c73a077d314..033eae2d03d3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -733,7 +733,7 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
spin_lock(&gt->sriov.vf.migration.lock);
- if (!gt->sriov.vf.migration.recovery_queued ||
+ if (!gt->sriov.vf.migration.recovery_queued &&
!gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 82c5fbcdfbe3..01477fc7b37b 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -140,7 +140,7 @@ static ssize_t reasons_show(struct kobject *kobj,
struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr);
if (other_ta->mask != U32_MAX && reasons & other_ta->mask)
- ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name);
+ ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name + strlen("reason_"));
}
if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons))
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 4ac434ad216f..a5019d1e741b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
{
g2h_fence->cancel = true;
g2h_fence->fail = true;
- g2h_fence->done = true;
+
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
}
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
@@ -1203,10 +1205,13 @@ retry_same_fence:
return ret;
}
- ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+ /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
+ * and g2h_fence_cancel.
+ */
+ ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
- if (g2h_fence.done) {
+ if (READ_ONCE(g2h_fence.done)) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
@@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- g2h_fence->done = true;
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
smp_mb();
wake_up_all(&ct->g2h_fence_wq);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ed7be50b2f72..f6ba2b0f074d 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -717,26 +717,46 @@ static bool vf_recovery(struct xe_guc *guc)
return xe_gt_recovery_pending(guc_to_gt(guc));
}
+static inline void relaxed_ms_sleep(unsigned int delay_ms)
+{
+ unsigned long min_us, max_us;
+
+ if (!delay_ms)
+ return;
+
+ if (delay_ms > 20) {
+ msleep(delay_ms);
+ return;
+ }
+
+ min_us = mul_u32_u32(delay_ms, 1000);
+ max_us = min_us + 500;
+
+ usleep_range(min_us, max_us);
+}
+
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
- unsigned int sleep_period_ms = 1;
+ unsigned int sleep_period_ms = 1, sleep_total_ms = 0;
#define AVAILABLE_SPACE \
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
try_again:
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
- if (wqi_size > AVAILABLE_SPACE) {
- if (sleep_period_ms == 1024) {
+ if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
+ if (sleep_total_ms > 2000) {
xe_gt_reset_async(q->gt);
return -ENODEV;
}
msleep(sleep_period_ms);
- sleep_period_ms <<= 1;
+ sleep_total_ms += sleep_period_ms;
+ if (sleep_period_ms < 64)
+ sleep_period_ms <<= 1;
goto try_again;
}
}
@@ -1585,7 +1605,7 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
since_resume_ms;
if (wait_ms > 0 && q->guc->resume_time)
- msleep(wait_ms);
+ relaxed_ms_sleep(wait_ms);
set_exec_queue_suspended(q);
disable_scheduling(q, false);
@@ -2253,10 +2273,11 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job = NULL;
+ struct xe_sched_job *job = NULL, *__job;
bool restore_replay = false;
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ list_for_each_entry(__job, &sched->base.pending_list, drm.list) {
+ job = __job;
restore_replay |= job->restore_replay;
if (restore_replay) {
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 2b3d49dd394c..495cdd4f948d 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -223,7 +223,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if (xe->heci_gsc.irq < 0)
return;
- ret = generic_handle_irq(xe->heci_gsc.irq);
+ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
@@ -243,7 +243,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if (xe->heci_gsc.irq < 0)
return;
- ret = generic_handle_irq(xe->heci_gsc.irq);
+ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2184af413b91..5a95b08a4723 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long sram_offset,
struct drm_pagemap_addr *sram_addr,
u64 vram_addr,
+ struct dma_fence *deps,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+ if (deps && !dma_fence_is_signaled(deps)) {
+ dma_fence_get(deps);
+ err = drm_sched_job_add_dependency(&job->drm, deps);
+ if (err)
+ dma_fence_wait(deps, false);
+ err = 0;
+ }
+
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -2175,6 +2184,8 @@ err:
* @npages: Number of pages to migrate.
* @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from an array dma addresses to a VRAM device physical address
*
@@ -2184,10 +2195,11 @@ err:
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr)
+ u64 dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
- XE_MIGRATE_COPY_TO_VRAM);
+ deps, XE_MIGRATE_COPY_TO_VRAM);
}
/**
@@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
* @dst_addr: Array of DMA information (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr)
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
- XE_MIGRATE_COPY_TO_SRAM);
+ deps, XE_MIGRATE_COPY_TO_SRAM);
}
static void xe_migrate_dma_unmap(struct xe_device *xe,
@@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
&pagemap_addr[current_page],
- vram_addr, write ?
+ vram_addr, NULL, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 260e298e5dd7..b76441f062b4 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr);
+ u64 dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr);
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 890c363282ae..f8bb28ab8124 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1105,11 +1105,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
- (OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl,
+ OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ (stream->periodic ?
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
- stream->period_exponent)) : 0);
+ stream->period_exponent) : 0));
/*
* Initialize Super Queue Internal Cnt Register
@@ -1254,6 +1255,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
+ if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
param->num_syncs = value;
return 0;
}
@@ -1343,7 +1347,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
ARRAY_SIZE(xe_oa_set_property_funcs_config));
if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
- XE_IOCTL_DBG(oa->xe, ext.pad))
+ XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c
index e9a7615bb5c5..3da81af97b8b 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vfio.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
@@ -21,7 +21,7 @@ EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
{
if (!IS_SRIOV_PF(xe))
- return -EPERM;
+ return false;
return xe_sriov_pf_migration_supported(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 55c5a0eb82e1..f97e0af6a9b0 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
static int xe_svm_copy(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages, const enum xe_svm_copy_dir dir)
+ unsigned long npages, const enum xe_svm_copy_dir dir,
+ struct dma_fence *pre_migrate_fence)
{
struct xe_vram_region *vr = NULL;
struct xe_gt *gt = NULL;
@@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
@@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
vram_addr, (u64)pagemap_addr[pos].addr, 1);
__fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
(u64)pagemap_addr[pos].addr, vram_addr, 1);
__fence = xe_migrate_to_vram(vr->migrate, 1,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -629,6 +634,8 @@ err_out:
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
+ if (pre_migrate_fence)
+ dma_fence_wait(pre_migrate_fence, false);
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
@@ -645,16 +652,20 @@ err_out:
static int xe_svm_copy_to_devmem(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
+ pre_migrate_fence);
}
static int xe_svm_copy_to_ram(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
+ pre_migrate_fence);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct xe_device *xe = xe_bo_device(bo);
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
xe_bo_put_async(bo);
xe_pm_runtime_put(xe);
}
@@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long timeslice_ms)
{
struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct dma_fence *pre_migrate_fence = NULL;
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
struct drm_buddy_block *block;
@@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
break;
}
+ /* Ensure that any clearing or async eviction will complete before migration. */
+ if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
+ err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ &pre_migrate_fence);
+ if (err)
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ else if (pre_migrate_fence)
+ dma_fence_enable_sw_signaling(pre_migrate_fence);
+ }
+
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops, dpagemap, end - start);
+ &dpagemap_devmem_ops, dpagemap, end - start,
+ pre_migrate_fence);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
@@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
xe_assert(vm->xe, IS_DGFX(vm->xe));
if (xe_svm_range_in_vram(range)) {
- drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 0955d2ac8d74..fa757dd07954 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -214,7 +214,7 @@ int xe_svm_init(struct xe_vm *vm)
{
#if IS_ENABLED(CONFIG_DRM_GPUSVM)
return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
- NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+ NULL, 0, 0, 0, NULL, NULL, 0);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 7cac646bdf1c..79ab6c512d3e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1508,7 +1508,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
- vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
+ if (flags & XE_VM_FLAG_FAULT_MODE)
+ vm->preempt.min_run_period_ms = 0;
+ else
+ vm->preempt.min_run_period_ms = 5;
for_each_tile(tile, xe, id)
xe_range_fence_tree_init(&vm->rftree[id]);
@@ -3324,6 +3327,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
+ if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ccd6cc090309..2168ef052499 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -263,7 +263,7 @@ struct xe_vm {
* @min_run_period_ms: The minimum run period before preempting
* an engine again
*/
- s64 min_run_period_ms;
+ unsigned int min_run_period_ms;
/** @exec_queues: list of exec queues attached to this VM */
struct list_head exec_queues;
/** @num_exec_queues: number exec queues attached to this VM */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 3764abca3d4f..e32dd2fde6f1 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -270,14 +270,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
- { XE_RTP_NAME("14020316580"),
- XE_RTP_RULES(MEDIA_VERSION(1301)),
- XE_RTP_ACTIONS(CLR(POWERGATE_ENABLE,
- VDN_HCP_POWERGATE_ENABLE(0) |
- VDN_MFXVDENC_POWERGATE_ENABLE(0) |
- VDN_HCP_POWERGATE_ENABLE(2) |
- VDN_MFXVDENC_POWERGATE_ENABLE(2))),
- },
{ XE_RTP_NAME("14019449301"),
XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index fb38eb3d6e9a..7ca7258eb5d8 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -76,3 +76,4 @@
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
16026007364 MEDIA_VERSION(3000)
+14020316580 MEDIA_VERSION(1301)