summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c545
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c10
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c12
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c20
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c122
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c69
-rw-r--r--drivers/gpu/drm/drm_pagemap.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c11
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c264
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.h33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c58
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_v2.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/pptable.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c17
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c30
-rw-r--r--drivers/gpu/drm/vkms/vkms_colorop.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/gpu/drm/xe/Kconfig5
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c9
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c72
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h18
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c32
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h1
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c14
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c3
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c29
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c51
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c7
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h2
-rw-r--r--drivers/gpu/nova-core/Kconfig2
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs14
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs78
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs11
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs105
122 files changed, 1588 insertions, 1254 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7e6bc0b3a589..ed85d0ceee3b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,7 +210,7 @@ config DRM_GPUVM
config DRM_GPUSVM
tristate
- depends on DRM && DEVICE_PRIVATE
+ depends on DRM
select HMM_MIRROR
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0e1c668b46d2..d26191717428 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -108,8 +108,10 @@ obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
drm_gpusvm_helper-y := \
- drm_gpusvm.o\
+ drm_gpusvm.o
+drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \
drm_pagemap.o
+
obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f9774f58ce1..b20a06abb65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,8 @@ extern int amdgpu_rebar;
extern int amdgpu_wbrf;
extern int amdgpu_user_queue;
+extern uint amdgpu_hdmi_hpd_debounce_delay_ms;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 12201b8e99b3..d2c3885de711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3445,11 +3445,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD/VPE, it's handled specially */
+ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@@ -5064,6 +5063,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ /*
+ * device went through surprise hotplug; we need to destroy topology
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
@@ -5867,6 +5874,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
if (ret)
goto mode1_reset_failed;
+ /* enable mmio access after mode 1 reset completed */
+ adev->no_hw_access = false;
+
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b5d34797d606..52bc04452812 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1880,7 +1880,12 @@ int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb)
{
struct amdgpu_bo *abo;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_framebuffer *fb;
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ fb = plane->state->fb;
+ else
+ fb = plane->fb;
if (!fb)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e22cfa7c6d32..c1461317eb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -95,18 +95,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
- /*
- * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
- * Such buffers cannot be safely accessed over P2P due to device-local
- * compression metadata. Fallback to system-memory path instead.
- * Device supports GFX12 (GC 12.x or newer)
- * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
- *
- */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
- bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
- attach->peer2peer = false;
-
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 848e6b7db482..6ccb80e2d7c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -247,6 +247,7 @@ int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
int amdgpu_rebar = -1; /* auto */
int amdgpu_user_queue = -1;
+uint amdgpu_hdmi_hpd_debounce_delay_ms;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1123,6 +1124,16 @@ module_param_named(rebar, amdgpu_rebar, int, 0444);
MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+/*
+ * DOC: hdmi_hpd_debounce_delay_ms (uint)
+ * HDMI HPD disconnect debounce delay in milliseconds.
+ *
+ * Used to filter short disconnect->reconnect HPD toggles some HDMI sinks
+ * generate while entering/leaving power save. Set to 0 to disable by default.
+ */
+MODULE_PARM_DESC(hdmi_hpd_debounce_delay_ms, "HDMI HPD disconnect debounce delay in milliseconds (0 to disable (by default), 1500 is common)");
+module_param_named(hdmi_hpd_debounce_delay_ms, amdgpu_hdmi_hpd_debounce_delay_ms, uint, 0644);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index c7843e336310..d78d9e7fb9d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -89,6 +89,16 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
+static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
+{
+ af->fence_wptr_start = af->ring->wptr;
+}
+
+static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
+{
+ af->fence_wptr_end = af->ring->wptr;
+}
+
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@@ -116,8 +126,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@@ -709,6 +721,7 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
+ bool reemitted = false;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
@@ -726,7 +739,9 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
- if (fence == af)
+ if (fence->reemitted > 1)
+ reemitted = true;
+ else if (fence == af)
dma_fence_set_error(&fence->base, -ETIME);
else if (fence->context == af->context)
dma_fence_set_error(&fence->base, -ECANCELED);
@@ -734,9 +749,12 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
- /* signal the guilty fence */
- amdgpu_fence_write(ring, (u32)af->base.seqno);
- amdgpu_fence_process(ring);
+
+ if (reemitted) {
+ /* if we've already reemitted once then just cancel everything */
+ amdgpu_fence_driver_force_completion(af->ring);
+ af->ring->ring_backup_entries_to_copy = 0;
+ }
}
void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
@@ -745,7 +763,7 @@ void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
- u64 start_wptr, u32 end_wptr)
+ u64 start_wptr, u64 end_wptr)
{
unsigned int first_idx = start_wptr & ring->buf_mask;
unsigned int last_idx = end_wptr & ring->buf_mask;
@@ -784,10 +802,18 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
/* save everything if the ring is not guilty, otherwise
* just save the content from other contexts.
*/
- if (!guilty_fence || (fence->context != guilty_fence->context))
+ if (!fence->reemitted &&
+ (!guilty_fence || (fence->context != guilty_fence->context))) {
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
+ } else if (!fence->reemitted) {
+ /* always save the fence */
+ amdgpu_ring_backup_unprocessed_command(ring,
+ fence->fence_wptr_start,
+ fence->fence_wptr_end);
+ }
wptr = fence->wptr;
+ fence->reemitted++;
}
rcu_read_unlock();
} while (last_seq != seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d2237ce9da70..1485f4789440 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -375,7 +375,7 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @start_page: first page to map in the GART aperture
* @num_pages: number of pages to be mapped
* @flags: page table entry flags
- * @dst: CPU address of the GART table
+ * @dst: valid CPU address of GART table, cannot be null
*
* Binds a BO that is allocated in VRAM to the GART page table
* (all ASICs).
@@ -396,7 +396,7 @@ void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
return;
for (i = 0; i < num_pages; ++i) {
- amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ amdgpu_gmc_set_pte_pde(adev, dst,
start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 869bceb0fe2c..7e623f91f2d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -732,6 +732,12 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
return 0;
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
+
+ if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) {
+ r = 0;
+ goto error_unlock_reset;
+ }
+
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 586a58facca1..72ec455fa932 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -302,7 +302,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job && job->vmid)
amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
amdgpu_ring_undo(ring);
- return r;
+ goto free_fence;
}
*f = &af->base;
/* get a ref for the job */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 37270c4dab8d..532f83d783d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -318,12 +318,36 @@ void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
+static int isp_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_resume)
+ return isp->funcs->hw_resume(isp);
+
+ return -ENODEV;
+}
+
+static int isp_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_suspend)
+ return isp->funcs->hw_suspend(isp);
+
+ return -ENODEV;
+}
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
+ .suspend = isp_suspend,
+ .resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index d6f4ffa4c97c..9a5d2b1dff9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -38,6 +38,8 @@ struct amdgpu_isp;
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
+ int (*hw_suspend)(struct amdgpu_isp *isp);
+ int (*hw_resume)(struct amdgpu_isp *isp);
};
struct amdgpu_isp {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0a0dcbf0798d..7ccb724b2488 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -217,8 +217,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner,
- drm_client_id);
+ r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id);
+ if (!r)
+ return 0;
+
+ kfree((*job)->hw_vm_fence);
err_fence:
kfree((*job)->hw_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6ee77f431d56..f65edd80cabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -201,6 +201,9 @@ static enum amd_ip_block_type
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
+ case AMDGPU_HW_IP_VPE:
+ type = AMD_IP_BLOCK_TYPE_VPE;
+ break;
default:
type = AMD_IP_BLOCK_TYPE_NUM;
break;
@@ -721,6 +724,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMD_IP_BLOCK_TYPE_UVD:
count = adev->uvd.num_uvd_inst;
break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ count = adev->vpe.num_instances;
+ break;
/* For all other IP block types not listed in the switch statement
* the ip status is valid here and the instance count is one.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7a27c6c4bb44..055437d4edf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -144,10 +144,15 @@ struct amdgpu_fence {
struct amdgpu_ring *ring;
ktime_t start_timestamp;
- /* wptr for the fence for resets */
+ /* wptr for the total submission for resets */
u64 wptr;
/* fence context for resets */
u64 context;
+ /* has this fence been reemitted */
+ unsigned int reemitted;
+ /* wptr for the fence for the submission */
+ u64 fence_wptr_start;
+ u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 9a969175900e..58b26c78b642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -885,12 +885,28 @@ static int amdgpu_userq_input_args_validate(struct drm_device *dev,
return 0;
}
+bool amdgpu_userq_enabled(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ return true;
+ }
+
+ return false;
+}
+
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index c37444427a14..b48b3bc293fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -141,6 +141,7 @@ uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
struct drm_file *filp);
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+bool amdgpu_userq_enabled(struct drm_device *dev);
int amdgpu_userq_suspend(struct amdgpu_device *adev);
int amdgpu_userq_resume(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index eba9fb359047..85e9edc1cb6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -141,6 +141,8 @@ static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
{
+ dma_fence_put(userq->last_fence);
+
amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
xa_destroy(&userq->fence_drv_xa);
/* Drop the fence_drv reference held by user queue */
@@ -471,6 +473,9 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_exec exec;
u64 wptr;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
size_mul(sizeof(u32), num_syncobj_handles));
@@ -653,6 +658,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
int r, i, rentry, wentry, cnt;
struct drm_exec exec;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
size_mul(sizeof(u32), num_read_bo_handles));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c362d4dfb5bb..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1069,9 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked &&
- /* SI doesn't support pasid or KIQ/MES */
- params->adev->family > AMDGPU_FAMILY_SI) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index d01d2712cf57..b786967022d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -278,7 +278,6 @@ static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
-static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val);
static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
@@ -4634,16 +4633,6 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring,
- bool start,
- bool secure)
-{
- uint32_t v = secure ? FRAME_TMZ : 0;
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
-}
-
static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
@@ -5520,7 +5509,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
.preempt_ib = gfx_v12_0_ring_preempt_ib,
- .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8ad7519f7b58..f1ee3921d970 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1235,16 +1235,16 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_RW:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_CC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4258d3e0b706..0002bcc6c4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -26,6 +26,7 @@
*/
#include <linux/gpio/machine.h>
+#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@@ -145,6 +146,9 @@ static int isp_genpd_add_device(struct device *dev, void *data)
return -ENODEV;
}
+ /* The devices will be managed by the pm ops from the parent */
+ dev_pm_syscore_device(dev, true);
+
exit:
/* Continue to add */
return 0;
@@ -177,12 +181,47 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
return -ENODEV;
}
+ dev_pm_syscore_device(dev, false);
exit:
/* Continue to remove */
return 0;
}
+static int isp_suspend_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int isp_resume_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int isp_v4_1_1_hw_suspend(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_suspend_device);
+ if (r)
+ dev_err(isp->parent, "failed to suspend hw devices (%d)\n", r);
+
+ return r;
+}
+
+static int isp_v4_1_1_hw_resume(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_resume_device);
+ if (r)
+ dev_err(isp->parent, "failed to resume hw device (%d)\n", r);
+
+ return r;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
const struct software_node *amd_camera_node, *isp4_node;
@@ -369,6 +408,8 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
static const struct isp_funcs isp_v4_1_1_funcs = {
.hw_init = isp_v4_1_1_hw_init,
.hw_fini = isp_v4_1_1_hw_fini,
+ .hw_suspend = isp_v4_1_1_hw_suspend,
+ .hw_resume = isp_v4_1_1_hw_resume,
};
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
index 27aa1a5b120f..fbb751821c69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -120,8 +120,7 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
&& dev->kfd->mec2_fw_version < 0x1b6) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1)
&& dev->kfd->mec2_fw_version < 0x30) ||
- (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
- KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0)))
+ kfd_dbg_has_cwsr_workaround(dev))
return false;
/* Assume debugging and cooperative launch supported otherwise. */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d7a2e7178ea9..625ea8ab7a74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,14 +1209,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes)
pdd->last_evict_timestamp = get_jiffies_64();
- retval = suspend_all_queues_mes(dqm);
- if (retval) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
- }
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1246,10 +1240,6 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
- } else {
- retval = resume_all_queues_mes(dqm);
- if (retval)
- dev_err(dev, "Resuming all queues failed");
}
out:
@@ -2919,6 +2909,14 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
+static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
+ struct kfd_mem_obj *mqd)
+{
+ WARN(!mqd, "No hiq sdma mqd trunk to free");
+
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+}
+
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@@ -3042,19 +3040,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
return dqm;
}
+ if (!dev->kfd->shared_resources.enable_mes)
+ deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
+
out_free:
kfree(dqm);
return NULL;
}
-static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
- struct kfd_mem_obj *mqd)
-{
- WARN(!mqd, "No hiq sdma mqd trunk to free");
-
- amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
-}
-
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.stop(dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index af53e796ea1b..6ada7b4af7c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page, 0);
+ zone_device_page_init(page, page_pgmap(page), 0);
}
static void
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 740711ac1037..1ea5a250440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5266,6 +5266,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
+ int real_brightness;
+ int init_brightness;
if (aconnector->bl_idx == -1)
return;
@@ -5290,6 +5292,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
+ init_brightness = props.brightness;
+
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
props.scale = BACKLIGHT_SCALE_NON_LINEAR;
@@ -5308,8 +5312,20 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
- } else
+ } else {
+ /*
+ * dm->brightness[x] can be inconsistent just after startup until
+ * ops.get_brightness is called.
+ */
+ real_brightness =
+ amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]);
+
+ if (real_brightness != init_brightness) {
+ dm->actual_brightness[aconnector->bl_idx] = real_brightness;
+ dm->brightness[aconnector->bl_idx] = real_brightness;
+ }
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
+ }
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5626,7 +5642,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
- drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ aconnector->base.name,
link->psr_settings.psr_feature_enabled,
link->psr_settings.psr_version,
link->dpcd_caps.psr_info.psr_version,
@@ -8930,9 +8947,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
- aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
- INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
- aconnector->hdmi_prev_sink = NULL;
+ /*
+ * If HDMI HPD debounce delay is set, use the minimum between selected
+ * value and AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS
+ */
+ if (amdgpu_hdmi_hpd_debounce_delay_ms) {
+ aconnector->hdmi_hpd_debounce_delay_ms = min(amdgpu_hdmi_hpd_debounce_delay_ms,
+ AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS);
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+ } else {
+ aconnector->hdmi_hpd_debounce_delay_ms = 0;
+ }
/*
* configure support HPD hot plug connector_>polled default value is 0
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index bd0403005f37..beb0d04d3e68 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,7 +59,10 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
-#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
+/*
+ * Maximum HDMI HPD debounce delay in milliseconds
+ */
+#define AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS 5000
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
index d585618b8064..a2de3bba8346 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -79,7 +79,6 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
list->type = ops[i]->base.id;
- list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
i++;
@@ -197,6 +196,9 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id);
+
return 0;
cleanup:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 697e232acebf..9fcd72d87d25 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -248,8 +248,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
- struct amdgpu_device *adev = drm_to_adev(dm->ddev);
- int r;
mutex_lock(&dm->dc_lock);
@@ -279,16 +277,7 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
-
- r = amdgpu_dpm_pause_power_profile(adev, true);
- if (r)
- dev_warn(adev->dev, "failed to set default power profile mode\n");
-
dc_allow_idle_optimizations(dm->dc, true);
-
- r = amdgpu_dpm_pause_power_profile(adev, false);
- if (r)
- dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 0a2a3f233a0e..e7b0928bd3db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -915,13 +915,19 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
- use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
-
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ /*
+ * Analog connectors may be hot-plugged unlike other connector
+ * types that don't support HPD. Only poll analog connectors.
+ */
+ use_polling |=
+ amdgpu_dm_connector->dc_link &&
+ dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id);
+
dc_link = amdgpu_dm_connector->dc_link;
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 2e3ee78999d9..7c4496fb4b9d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1790,12 +1790,13 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
static int
dm_plane_init_colorops(struct drm_plane *plane)
{
- struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {};
struct drm_device *dev = plane->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dc *dc = adev->dm.dc;
int len = 0;
- int ret;
+ int ret = 0;
+ int i;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
@@ -1806,7 +1807,7 @@ dm_plane_init_colorops(struct drm_plane *plane)
if (ret) {
drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
plane->base.id, ret);
- return ret;
+ goto out;
}
len++;
@@ -1814,7 +1815,11 @@ dm_plane_init_colorops(struct drm_plane *plane)
drm_plane_create_color_pipeline_property(plane, pipelines, len);
}
- return 0;
+out:
+ for (i = 0; i < len; i++)
+ kfree(pipelines[i].name);
+
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index d1471f34e419..9f11e6ca4051 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -763,14 +763,14 @@ static enum bp_result bios_parser_encoder_control(
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac1_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
} else if (cntl->engine_id == ENGINE_ID_DACB) {
if (!bp->cmd_tbl.dac2_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac2_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 22457f417e65..76a3559f0ddc 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -1797,7 +1797,30 @@ static enum bp_result select_crtc_source_v3(
&params.ucEncodeMode))
return BP_RESULT_BADINPUT;
- params.ucDstBpc = bp_params->bit_depth;
+ switch (bp_params->color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ params.ucDstBpc = PANEL_BPC_UNDEFINE;
+ break;
+ case COLOR_DEPTH_666:
+ params.ucDstBpc = PANEL_6BIT_PER_COLOR;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ params.ucDstBpc = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucDstBpc = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucDstBpc = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_141414:
+ dm_error("14-bit color not supported by SelectCRTC_Source v3\n");
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucDstBpc = PANEL_16BIT_PER_COLOR;
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
result = BP_RESULT_OK;
@@ -1815,12 +1838,12 @@ static enum bp_result select_crtc_source_v3(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
@@ -1846,12 +1869,15 @@ static void init_dac_encoder_control(struct bios_parser *bp)
static void dac_encoder_control_prepare_params(
DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
params->ucDacStandard = dac_standard;
- if (enable)
+ if (action == ENCODER_CONTROL_SETUP ||
+ action == ENCODER_CONTROL_INIT)
+ params->ucAction = ATOM_ENCODER_INIT;
+ else if (action == ENCODER_CONTROL_ENABLE)
params->ucAction = ATOM_ENABLE;
else
params->ucAction = ATOM_DISABLE;
@@ -1864,7 +1890,7 @@ static void dac_encoder_control_prepare_params(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1873,7 +1899,7 @@ static enum bp_result dac1_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
@@ -1885,7 +1911,7 @@ static enum bp_result dac1_encoder_control_v1(
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1894,7 +1920,7 @@ static enum bp_result dac2_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index e89b1ba0048b..78bdbcaa61c8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -57,12 +57,12 @@ struct cmd_tbl {
struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac2_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac1_output_control)(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
index b015e80672ec..fcd3ab4b0045 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
@@ -41,7 +41,7 @@
/* kHZ*/
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
/* kHZ*/
-#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 340000
struct dp_hdmi_dongle_signature_data {
int8_t id[15];/* "DP-HDMI ADAPTOR"*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index b357683b4255..268b5fbdb48b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -30,7 +30,11 @@ dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- frame_warn_limit := 3072
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
else
frame_warn_limit := 2048
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 8d24763938ea..1df3412be346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -77,32 +77,14 @@ static unsigned int dscceComputeDelay(
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
-// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -116,7 +98,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -124,9 +105,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -135,14 +113,7 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix);
+ bool *NotEnoughTimeForDynamicMetadata);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
@@ -294,62 +265,23 @@ static void CalculateDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ enum clock_change_support *DRAMClockChangeSupport);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
@@ -810,29 +742,12 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -846,7 +761,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -854,9 +768,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -865,15 +776,10 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix)
+ bool *NotEnoughTimeForDynamicMetadata)
{
+ struct vba_vars_st *v = &mode_lib->vba;
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
@@ -905,26 +811,26 @@ static bool CalculatePrefetchSchedule(
double Tdmec = 0;
double Tdmsks = 0;
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMInefficiencyFactor = v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
- ProgressiveToInterlaceUnitInOPP,
+ v->ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
@@ -932,16 +838,16 @@ static bool CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
- *Tdmdl = TWait + Tvm_trips + trip_to_mem;
+ if (v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true) {
+ v->Tdmdl[k] = TWait + Tvm_trips + trip_to_mem;
} else {
- *Tdmdl = TWait + UrgentExtraLatency;
+ v->Tdmdl[k] = TWait + UrgentExtraLatency;
}
- if (DynamicMetadataEnable == true) {
- if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
+ if (v->DynamicMetadataEnable[k] == true) {
+ if (VStartup * LineTime < Tsetup + v->Tdmdl[k] + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
@@ -949,39 +855,39 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ v->Tdmdl_vm[k] = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
- *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ v->DSTXAfterScaler[k] = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
- *DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
+ v->DSTXAfterScaler[k] = v->DSTXAfterScaler[k] + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
- *DSTYAfterScaler = 1;
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && v->ProgressiveToInterlaceUnitInOPP))
+ v->DSTYAfterScaler[k] = 1;
else
- *DSTYAfterScaler = 0;
+ v->DSTYAfterScaler[k] = 0;
- DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = v->DSTYAfterScaler[k] * myPipe->HTotal + v->DSTXAfterScaler[k];
+ v->DSTYAfterScaler[k] = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ v->DSTXAfterScaler[k] = DSTTotalPixelsAfterScaler - ((double) (v->DSTYAfterScaler[k] * myPipe->HTotal));
MyError = false;
@@ -990,33 +896,33 @@ static bool CalculatePrefetchSchedule(
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
- if (GPUVMEnable) {
- if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
+ if (v->GPUVMEnable) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
+ v->Tno_bw[k] = UrgentExtraLatency + trip_to_mem * ((v->GPUVMMaxPageTableLevels - 2) - 1);
} else
- *Tno_bw = 0;
+ v->Tno_bw[k] = 0;
} else if (!myPipe->DCCEnable)
- *Tno_bw = LineTime;
+ v->Tno_bw[k] = LineTime;
else
- *Tno_bw = LineTime / 4;
+ v->Tno_bw[k] = LineTime / 4;
- dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, v->Tdmdl[k])) / LineTime
+ - (v->DSTYAfterScaler[k] + v->DSTXAfterScaler[k] / myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
- prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
+ prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k]) / Tsw_oto;
- if (GPUVMEnable == true) {
- Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ if (v->GPUVMEnable == true) {
+ Tvm_oto = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
@@ -1042,10 +948,10 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
- dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
- dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
+ dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", v->Tdmdl_vm[k]);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
+ dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", v->DSTXAfterScaler[k]);
+ dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)v->DSTYAfterScaler[k]);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -1059,26 +965,26 @@ static bool CalculatePrefetchSchedule(
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
- if (Tpre_rounded - *Tno_bw > 0)
+ if (Tpre_rounded - v->Tno_bw[k] > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
- + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
- / (Tpre_rounded - *Tno_bw);
+ + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
+ / (Tpre_rounded - v->Tno_bw[k]);
else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
- PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
+ if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]) > 0) {
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]);
}
- if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ if (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
- BytePerPixelC) /
- (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
+ v->BytePerPixelC[k]) /
+ (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
@@ -1086,7 +992,7 @@ static bool CalculatePrefetchSchedule(
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
- swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
+ swath_width_chroma_ub * v->BytePerPixelC[k]) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
@@ -1096,7 +1002,7 @@ static bool CalculatePrefetchSchedule(
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
- PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
+ PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
@@ -1107,7 +1013,7 @@ static bool CalculatePrefetchSchedule(
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
@@ -1118,7 +1024,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth2 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
@@ -1129,7 +1035,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth3 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
@@ -1152,13 +1058,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable) {
- Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
+ if (v->GPUVMEnable) {
+ Tvm_equ = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable || myPipe->DCCEnable)) {
+ if ((v->GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -1227,7 +1133,7 @@ static bool CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
- *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
+ *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * v->BytePerPixelC[k] * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
@@ -1243,9 +1149,9 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
- dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
+ dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
@@ -1276,7 +1182,7 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
- *prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
+ v->prefetch_vmrow_bw[k] = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
@@ -2437,30 +2343,12 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelay[k],
- v->DPPCLKDelaySubtotal
- + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
@@ -2474,7 +2362,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
- v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
@@ -2482,9 +2369,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
@@ -2493,14 +2377,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
- &v->NotEnoughTimeForDynamicMetadata[k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NotEnoughTimeForDynamicMetadata[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
@@ -2730,62 +2607,23 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
- v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &DRAMClockChangeSupport);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -4770,29 +4608,12 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelayPerState[i][k],
- v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
@@ -4806,7 +4627,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
- v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
@@ -4814,9 +4634,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
@@ -4825,14 +4642,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &v->NoTimeForDynamicMetadata[i][j][k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NoTimeForDynamicMetadata[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5007,62 +4817,23 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
- v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->DRAMClockChangeSupport[i][j]);
}
}
@@ -5179,63 +4950,25 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ enum clock_change_support *DRAMClockChangeSupport)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
@@ -5254,101 +4987,101 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
- mode_lib->vba.TotalActiveDPP = 0;
- mode_lib->vba.TotalDCCActiveDPP = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
- if (DCCEnable[k] == true) {
- mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
+ v->TotalActiveDPP = 0;
+ v->TotalDCCActiveDPP = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalActiveDPP = v->TotalActiveDPP + DPPPerPlane[k];
+ if (v->DCCEnable[k] == true) {
+ v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + DPPPerPlane[k];
}
}
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->UrgentWatermark;
- mode_lib->vba.TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
- mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ v->TotalActiveWriteback = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
+ v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
- mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ v->LBLatencyHidingSourceLinesY = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
- mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ v->LBLatencyHidingSourceLinesC = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
- LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
- ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
+ if (v->WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
- if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
- WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
+ WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
- mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
- mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
+ v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5356,40 +5089,40 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->FinalDRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
- SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
- mode_lib->vba.TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
- mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
+ v->TotalNumberOfActiveOTG = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (v->MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
+ } else if (((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
- TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
}
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
+ v->StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ v->StutterEnterPlusExitWatermark = dml_max(v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 0cdd8c74abdf..ebd74b43e935 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1610,38 +1610,12 @@ dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
struct dc_bios *bios = link->ctx->dc_bios;
struct bp_crtc_source_select crtc_source_select = {0};
enum engine_id engine_id = link->link_enc->preferred_engine;
- uint8_t bit_depth;
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
engine_id = link->link_enc->analog_engine;
- switch (pipe_ctx->stream->timing.display_color_depth) {
- case COLOR_DEPTH_UNDEFINED:
- bit_depth = 0;
- break;
- case COLOR_DEPTH_666:
- bit_depth = 6;
- break;
- default:
- case COLOR_DEPTH_888:
- bit_depth = 8;
- break;
- case COLOR_DEPTH_101010:
- bit_depth = 10;
- break;
- case COLOR_DEPTH_121212:
- bit_depth = 12;
- break;
- case COLOR_DEPTH_141414:
- bit_depth = 14;
- break;
- case COLOR_DEPTH_161616:
- bit_depth = 16;
- break;
- }
-
crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
- crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
crtc_source_select.engine_id = engine_id;
crtc_source_select.sink_signal = pipe_ctx->stream->signal;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 6d31f4967f1a..7fa6bc97a919 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -336,7 +336,7 @@ static void query_dp_dual_mode_adaptor(
/* Assume we have no valid DP passive dongle connected */
*dongle = DISPLAY_DONGLE_NONE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
if (!i2c_read(
@@ -392,6 +392,8 @@ static void query_dp_dual_mode_adaptor(
}
}
+ if (is_valid_hdmi_signature)
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
if (is_type2_dongle) {
uint32_t max_tmds_clk =
@@ -932,7 +934,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum engine_id engine_id = link_enc->preferred_engine;
enum dal_device_type device_type = DEVICE_TYPE_CRT;
- enum bp_result bp_result;
+ enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t enum_id;
switch (engine_id) {
@@ -946,7 +948,9 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
break;
}
- bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ if (bios->funcs->dac_load_detection)
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+
return bp_result == BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 973b6bdbac63..f40dc612ec73 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -136,7 +136,7 @@ struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
enum signal_type sink_signal;
- uint8_t bit_depth;
+ enum dc_color_depth color_depth;
};
struct bp_transmitter_control {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 1f539cc65f41..695432d3045f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -2273,8 +2273,6 @@ static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
if (scaling_factor == 0)
return -EINVAL;
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
ret = si_calculate_adjusted_tdp_limits(adev,
false, /* ??? */
adev->pm.dpm.tdp_adjustment,
@@ -2283,6 +2281,12 @@ static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
if (ret)
return ret;
+ if (adev->pdev->device == 0x6611 && adev->pdev->revision == 0x87) {
+ /* Workaround buggy powertune on Radeon 430 and 520. */
+ tdp_limit = 32;
+ near_tdp_limit = 28;
+ }
+
smc_table->dpm2Params.TDPLimit =
cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
smc_table->dpm2Params.NearTDPLimit =
@@ -2328,16 +2332,8 @@ static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
if (ni_pi->enable_power_containment) {
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
- u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
int ret;
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
- smc_table->dpm2Params.NearTDPLimit =
- cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
- smc_table->dpm2Params.SafePowerLimit =
- cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
-
ret = amdgpu_si_copy_bytes_to_smc(adev,
(si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
@@ -3473,10 +3469,15 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
+ (adev->pdev->revision == 0x87 &&
+ adev->pdev->device != 0x6611) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605)) {
max_sclk = 75000;
+ } else if (adev->pdev->revision == 0x87 &&
+ adev->pdev->device == 0x6611) {
+ /* Radeon 430 and 520 */
+ max_sclk = 78000;
}
}
@@ -7600,12 +7601,12 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
- WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
+ WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
- WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
+ WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7617,12 +7618,12 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
- WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
+ WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
- WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
+ WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7c9f77124ab2..c4966dcc6875 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2455,24 +2455,21 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
- pptable->PcieLaneCount[i] > pcie_width_cap) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
- pptable->PcieGenSpeed[i] > pcie_gen_cap ?
- pcie_gen_cap : pptable->PcieGenSpeed[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
- pptable->PcieLaneCount[i] > pcie_width_cap ?
- pcie_width_cap : pptable->PcieLaneCount[i];
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_gen_cap << 8;
- smu_pcie_arg |= pcie_width_cap;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- break;
- }
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_gen[i] << 8;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_lane[i];
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 677781060246..eaeff6a9bc50 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2923,8 +2923,13 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
break;
}
- if (!ret)
+ if (!ret) {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 2cea688c604f..d7642d388bc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1702,8 +1702,9 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
+ int16_t od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ uint32_t power_limit;
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2143,10 +2144,15 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
if (!ret) {
- if (amdgpu_emu_mode == 1)
+ if (amdgpu_emu_mode == 1) {
msleep(50000);
- else
+ } else {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(1000);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
index 82aaf74e1bc0..432342452484 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-dp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
@@ -2062,33 +2062,41 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
}
ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret)
+ if (ret) {
dev_err_probe(dev, ret, "Failed to attach bridge\n");
+ goto unregister_aux;
+ }
dw_dp_init_hw(dp);
ret = phy_init(dp->phy);
if (ret) {
dev_err_probe(dev, ret, "phy init failed\n");
- return ERR_PTR(ret);
+ goto unregister_aux;
}
ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp);
if (ret)
- return ERR_PTR(ret);
+ goto unregister_aux;
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0)
- return ERR_PTR(ret);
+ if (dp->irq < 0) {
+ ret = dp->irq;
+ goto unregister_aux;
+ }
ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq,
IRQF_ONESHOT, dev_name(dev), dp);
if (ret) {
dev_err_probe(dev, ret, "failed to request irq\n");
- return ERR_PTR(ret);
+ goto unregister_aux;
}
return dp;
+
+unregister_aux:
+ drm_dp_aux_unregister(&dp->aux);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dw_dp_bind);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index fe4c026280f0..60166919c5b5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -163,6 +163,7 @@ struct dw_hdmi_qp {
unsigned long ref_clk_rate;
struct regmap *regm;
+ int main_irq;
unsigned long tmds_char_rate;
};
@@ -1271,6 +1272,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
dw_hdmi_qp_init_hw(hdmi);
+ hdmi->main_irq = plat_data->main_irq;
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
dw_hdmi_qp_main_hardirq, NULL,
IRQF_SHARED, dev_name(dev), hdmi);
@@ -1331,9 +1333,16 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi)
+{
+ disable_irq(hdmi->main_irq);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_qp_suspend);
+
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_init_hw(hdmi);
+ enable_irq(hdmi->main_irq);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 10adac9397cf..5beea645035f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1162,8 +1162,18 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
new_state->self_refresh_active;
}
-static void
-encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, disables the bridge chain all the way, then disables the encoder
+ * afterwards.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1229,9 +1239,18 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
-static void
-crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_disable - disable CRTSs
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all CRTCs in the current state and if the CRTC needs
+ * it, disables it.
+ */
+void
+drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1282,9 +1301,18 @@ crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
drm_crtc_vblank_put(crtc);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
-static void
-encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, post-disables all encoder bridges.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1335,15 +1363,16 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
- encoder_bridge_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
- crtc_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
- encoder_bridge_post_disable(dev, state);
+ drm_atomic_helper_commit_crtc_disable(dev, state);
}
/**
@@ -1446,8 +1475,17 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
-static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_set_mode - set the new mode
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the mode has
+ * changed, change the mode of the CRTC, then call down the bridge
+ * chain and change the mode in all bridges as well.
+ */
+void
+drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1508,6 +1546,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
@@ -1531,12 +1570,21 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, state);
+ drm_atomic_helper_commit_crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
-static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_writebacks - issue writebacks
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors, checks if the new state requires
+ * a writeback job to be issued and in that case issues an atomic
+ * commit on each connector.
+ */
+void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1555,9 +1603,18 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
-static void
-encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors and if the CRTC needs it, pre-enables
+ * the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1588,9 +1645,18 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
-static void
-crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_enable - enables the CRTCs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over CRTCs in the new state, and of the CRTC needs
+ * it, enables it.
+ */
+void
+drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1619,9 +1685,18 @@ crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
-static void
-encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over all connectors in the new state, and of the CRTC needs
+ * it, enables the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1664,6 +1739,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
@@ -1682,11 +1758,11 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *state)
{
- encoder_bridge_pre_enable(dev, state);
+ drm_atomic_helper_commit_crtc_enable(dev, state);
- crtc_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
- encoder_bridge_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4a7f72044ab8..4b47aa0dab35 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -366,6 +366,9 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
+ if (helper->info->state != FBINFO_STATE_RUNNING)
+ return;
+
drm_fb_helper_fb_dirty(helper);
}
@@ -732,6 +735,13 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
+ /*
+ * Cancel pending damage work. During GPU reset, VBlank
+ * interrupts are disabled and drm_fb_helper_fb_dirty()
+ * would wait for VBlank timeout otherwise.
+ */
+ cancel_work_sync(&fb_helper->damage_work);
+
console_lock();
} else {
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 93b9cff89080..f13eb5f36e8a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -96,7 +96,8 @@ err_release:
/**
* drm_gem_shmem_init - Initialize an allocated object.
* @dev: DRM device
- * @obj: The allocated shmem GEM object.
+ * @shmem: The allocated shmem GEM object.
+ * @size: Buffer size in bytes
*
* Returns:
* 0 on success, or a negative error code on failure.
@@ -895,4 +896,4 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 8a06d296561d..0de47e83d84d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1602,14 +1602,48 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+/*
+ * drm_gpuvm_bo_destroy_not_in_lists() - final part of drm_gpuvm_bo cleanup
+ * @vm_bo: the &drm_gpuvm_bo to destroy
+ *
+ * It is illegal to call this method if the @vm_bo is present in the GEMs gpuva
+ * list, the extobj list, or the evicted list.
+ *
+ * Note that this puts a refcount on the GEM object, which may destroy the GEM
+ * object if the refcount reaches zero. It's illegal for this to happen if the
+ * caller holds the GEMs gpuva mutex because it would free the mutex.
+ */
+static void
+drm_gpuvm_bo_destroy_not_in_lists(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gem_object *obj = vm_bo->obj;
+
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+}
+
+static void
+drm_gpuvm_bo_destroy_not_in_lists_kref(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
+}
+
static void
drm_gpuvm_bo_destroy(struct kref *kref)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
struct drm_gpuvm *gpuvm = vm_bo->vm;
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
- struct drm_gem_object *obj = vm_bo->obj;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
if (!lock)
@@ -1618,16 +1652,10 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(gpuvm, obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, vm_bo->obj);
list_del(&vm_bo->list.entry.gem);
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
/**
@@ -1745,9 +1773,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
void
drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
{
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gpuvm_bo *vm_bo;
- struct drm_gem_object *obj;
struct llist_node *bo_defer;
bo_defer = llist_del_all(&gpuvm->bo_defer);
@@ -1766,14 +1792,7 @@ drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
while (bo_defer) {
vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
bo_defer = bo_defer->next;
- obj = vm_bo->obj;
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
@@ -1861,6 +1880,9 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
* count is decreased. If not found @__vm_bo is returned without further
* increase of the reference count.
*
+ * The provided @__vm_bo must not already be in the gpuva, evict, or extobj
+ * lists prior to calling this method.
+ *
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
*
* Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
@@ -1873,14 +1895,19 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
struct drm_gem_object *obj = __vm_bo->obj;
struct drm_gpuvm_bo *vm_bo;
+ drm_WARN_ON(gpuvm->drm, !drm_gpuvm_immediate_mode(gpuvm));
+
+ mutex_lock(&obj->gpuva.lock);
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
if (vm_bo) {
- drm_gpuvm_bo_put(__vm_bo);
+ mutex_unlock(&obj->gpuva.lock);
+ kref_put(&__vm_bo->kref, drm_gpuvm_bo_destroy_not_in_lists_kref);
return vm_bo;
}
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
+ mutex_unlock(&obj->gpuva.lock);
return __vm_bo;
}
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 37d7cfbbb3e8..704f2f945019 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -3,6 +3,7 @@
* Copyright © 2024-2025 Intel Corporation
*/
+#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
@@ -196,7 +197,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
- zone_device_page_init(page, 0);
+ zone_device_page_init(page, page_pgmap(page), 0);
}
/**
@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, pagemap_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages,
+ devmem_allocation->pre_migrate_fence);
if (err)
goto err_finalize;
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
+ devmem_allocation->pre_migrate_fence = NULL;
+
/* Upon success bind devmem allocation to range and zdd */
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(timeslice_ms);
@@ -596,7 +601,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
* @ops: Pointer to the operations structure for GPU SVM device memory
* @dpagemap: The struct drm_pagemap we're allocating from.
* @size: Size of device memory allocation
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence)
{
init_completion(&devmem_allocation->detached);
devmem_allocation->dev = dev;
@@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
devmem_allocation->ops = ops;
devmem_allocation->dpagemap = dpagemap;
devmem_allocation->size = size;
+ devmem_allocation->pre_migrate_fence = pre_migrate_fence;
}
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 01813e11e6c6..8e76ac8ee4e2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1692,7 +1692,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
- mod_delayed_work(system_wq, &hdata->hotplug_work,
+ mod_delayed_work(system_percpu_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 76d77a736d84..4b77be94348d 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -457,27 +457,20 @@ int gud_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
- const struct drm_format_info *format = fb->format;
+ const struct drm_format_info *format;
struct drm_connector *connector;
unsigned int i, num_properties;
struct gud_state_req *req;
int idx, ret;
size_t len;
- if (drm_WARN_ON_ONCE(plane->dev, !fb))
- return -EINVAL;
-
- if (drm_WARN_ON_ONCE(plane->dev, !crtc))
- return -EINVAL;
-
- crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
- mode = &crtc_state->mode;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
@@ -492,6 +485,9 @@ int gud_plane_atomic_check(struct drm_plane *plane,
if (old_plane_state->rotation != new_plane_state->rotation)
crtc_state->mode_changed = true;
+ mode = &crtc_state->mode;
+ format = fb->format;
+
if (old_fb && old_fb->format != format)
crtc_state->mode_changed = true;
@@ -598,7 +594,7 @@ void gud_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_helper_damage_iter iter;
int ret, idx;
- if (crtc->state->mode_changed || !crtc->state->enable) {
+ if (!crtc || crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
mutex_lock(&gdrm->damage_lock);
if (gdrm->fb) {
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
index 942d9b9c93ce..04af552b3648 100644
--- a/drivers/gpu/drm/i915/display/intel_color_pipeline.c
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
@@ -34,11 +34,19 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en
return ret;
list->type = colorop->base.base.id;
- list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
/* TODO: handle failures and clean up */
prev_op = &colorop->base;
+ colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
+ ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+ prev_op = &colorop->base;
+
if (DISPLAY_VER(display) >= 35 &&
intel_color_crtc_has_3dlut(display, pipe) &&
plane->type == DRM_PLANE_TYPE_PRIMARY) {
@@ -55,15 +63,6 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en
prev_op = &colorop->base;
}
- colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
- ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
- DRM_COLOROP_FLAG_ALLOW_BYPASS);
- if (ret)
- return ret;
-
- drm_colorop_set_next_property(prev_op, &colorop->base);
- prev_op = &colorop->base;
-
colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
PLANE_GAMMA_SIZE,
@@ -74,6 +73,8 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en
drm_colorop_set_next_property(prev_op, &colorop->base);
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", list->type);
+
return 0;
}
@@ -81,9 +82,10 @@ int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
{
struct drm_device *dev = plane->dev;
struct intel_display *display = to_intel_display(dev);
- struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {};
int len = 0;
- int ret;
+ int ret = 0;
+ int i;
/* Currently expose pipeline only for HDR planes */
if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
@@ -92,8 +94,14 @@ int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
/* Add pipeline consisting of transfer functions */
ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
if (ret)
- return ret;
+ goto out;
len++;
- return drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ ret = drm_plane_create_color_pipeline_property(plane, pipelines, len);
+
+ for (i = 0; i < len; i++)
+ kfree(pipelines[i].name);
+
+out:
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b057c2fa03a4..d49e96f9be51 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ return err;
}
err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) {
i915_vma_put(vma);
- goto err;
+ return err;
}
err = eb_add_vma(eb, &current_batch, i, vma);
@@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
- if (err) {
- if (i + 1 < eb->buffer_count) {
- /*
- * Execbuffer code expects last vma entry to be NULL,
- * since we already initialized this entry,
- * set the next value to NULL or we mess up
- * cleanup handling.
- */
- eb->vma[i + 1].vma = NULL;
- }
-
+ if (err)
return err;
- }
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
eb->args->flags |= __EXEC_USERPTR_USED;
@@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
return 0;
-
-err:
- eb->vma[i].vma = NULL;
- return err;
}
static int eb_lock_vmas(struct i915_execbuffer *eb)
@@ -3375,7 +3360,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.exec = exec;
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
+ memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
+
eb.batch_pool = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
@@ -3584,7 +3570,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate extra slots for use by the command parser */
+ /*
+ * Allocate extra slots for use by the command parser.
+ *
+ * Note that this allocation handles two different arrays (the
+ * exec2_list array, and the eventual eb.vma array introduced in
+ * i915_gem_do_execbuffer()), that reside in virtually contiguous
+ * memory. Also note that the allocation intentionally doesn't fill the
+ * area with zeros, because the exec2_list part doesn't need to be, as
+ * it's immediately overwritten by user data a few lines below.
+ * However, the eb.vma part is explicitly zeroed later in
+ * i915_gem_do_execbuffer().
+ */
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 7582ef34bf3f..303d8d9b7775 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -686,7 +686,7 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
}
/* This list includes registers that are useful in debugging GuC hangs. */
-const struct {
+static const struct {
u32 start;
u32 count;
} guc_hw_reg_state[] = {
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 8a56952f6730..99d681413eff 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -137,6 +137,7 @@ update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
struct rogue_fwif_kccb_cmd cmd;
int idx;
int err;
+ int slot;
if (group_mask)
fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
@@ -154,8 +155,13 @@ update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
cmd.kccb_flags = 0;
- err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot);
+ if (err)
+ goto err_drm_dev_exit;
+
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+err_drm_dev_exit:
drm_dev_exit(idx);
err_up_read:
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index a66cf082af24..c07c9a915190 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj)
drm_gem_shmem_object_free(obj);
}
+static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
+{
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
+
+ if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
{
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
@@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
.free = pvr_gem_object_free,
.print_info = drm_gem_shmem_object_print_info,
+ .export = pvr_gem_export,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 96188bf9274a..ad8c8b823681 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -8,7 +8,7 @@ config DRM_MEDIATEK
depends on OF
depends on MTK_MMSYS
select DRM_CLIENT_SELECTION
- select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 61cab32e213a..53360b5d12ba 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -836,20 +836,6 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
- int ret;
-
- dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1);
- if (IS_ERR(dpi->next_bridge)) {
- ret = PTR_ERR(dpi->next_bridge);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- /* Old devicetree has only one endpoint */
- dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0);
- if (IS_ERR(dpi->next_bridge))
- return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge),
- "Failed to get bridge\n");
- }
return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
@@ -1319,6 +1305,15 @@ static int mtk_dpi_probe(struct platform_device *pdev)
if (dpi->irq < 0)
return dpi->irq;
+ dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1);
+ if (IS_ERR(dpi->next_bridge) && PTR_ERR(dpi->next_bridge) == -ENODEV) {
+ /* Old devicetree has only one endpoint */
+ dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0);
+ }
+ if (IS_ERR(dpi->next_bridge))
+ return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge),
+ "Failed to get bridge\n");
+
platform_set_drvdata(pdev, dpi);
dpi->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0e2bcd5f67b7..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,12 +1002,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
- /*
- * set flag to request the DSI host bridge be pre-enabled before device bridge
- * in the chain, so the DSI host is ready when the device bridge is pre-enabled
- */
- dsi->next_bridge->pre_enable_prev_first = true;
-
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index 024cc7e9036c..7525a9f9907a 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
*/
#include <linux/dma-buf.h>
@@ -18,24 +20,64 @@
static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-static const struct vm_operations_struct vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
+static void mtk_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+ struct mtk_drm_private *priv = obj->dev->dev_private;
+
+ if (dma_obj->sgt)
+ drm_prime_gem_destroy(obj, dma_obj->sgt);
+ else
+ dma_free_wc(priv->dma_dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->dma_addr);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(dma_obj);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ * the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+ struct mtk_drm_private *priv = obj->dev->dev_private;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr,
+ dma_obj->dma_addr, obj->size);
+ if (ret) {
+ DRM_ERROR("failed to allocate sgt, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
.free = mtk_gem_free_object,
+ .print_info = drm_gem_dma_object_print_info,
.get_sg_table = mtk_gem_prime_get_sg_table,
- .vmap = mtk_gem_prime_vmap,
- .vunmap = mtk_gem_prime_vunmap,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = mtk_gem_object_mmap,
- .vm_ops = &vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
-static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
- unsigned long size)
+static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev,
+ unsigned long size, bool private)
{
- struct mtk_gem_obj *mtk_gem_obj;
+ struct drm_gem_dma_object *dma_obj;
int ret;
size = round_up(size, PAGE_SIZE);
@@ -43,86 +85,65 @@ static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
if (size == 0)
return ERR_PTR(-EINVAL);
- mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
- if (!mtk_gem_obj)
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
+ dma_obj->base.funcs = &mtk_gem_object_funcs;
- ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
- if (ret < 0) {
+ if (private) {
+ ret = 0;
+ drm_gem_private_object_init(dev, &dma_obj->base, size);
+ } else {
+ ret = drm_gem_object_init(dev, &dma_obj->base, size);
+ }
+ if (ret) {
DRM_ERROR("failed to initialize gem object\n");
- kfree(mtk_gem_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
- return mtk_gem_obj;
+ return dma_obj;
}
-struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
- size_t size, bool alloc_kmap)
+static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size)
{
struct mtk_drm_private *priv = dev->dev_private;
- struct mtk_gem_obj *mtk_gem;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj;
int ret;
- mtk_gem = mtk_gem_init(dev, size);
- if (IS_ERR(mtk_gem))
- return ERR_CAST(mtk_gem);
-
- obj = &mtk_gem->base;
+ dma_obj = mtk_gem_init(dev, size, false);
+ if (IS_ERR(dma_obj))
+ return ERR_CAST(dma_obj);
- mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
+ obj = &dma_obj->base;
- if (!alloc_kmap)
- mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
-
- mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
- &mtk_gem->dma_addr, GFP_KERNEL,
- mtk_gem->dma_attrs);
- if (!mtk_gem->cookie) {
+ dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size,
+ &dma_obj->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!dma_obj->vaddr) {
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
ret = -ENOMEM;
goto err_gem_free;
}
- if (alloc_kmap)
- mtk_gem->kvaddr = mtk_gem->cookie;
-
- DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
- mtk_gem->cookie, &mtk_gem->dma_addr,
+ DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n",
+ dma_obj->vaddr, &dma_obj->dma_addr,
size);
- return mtk_gem;
+ return dma_obj;
err_gem_free:
drm_gem_object_release(obj);
- kfree(mtk_gem);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
-void mtk_gem_free_object(struct drm_gem_object *obj)
-{
- struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
- struct mtk_drm_private *priv = obj->dev->dev_private;
-
- if (mtk_gem->sg)
- drm_prime_gem_destroy(obj, mtk_gem->sg);
- else
- dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
- mtk_gem->dma_addr, mtk_gem->dma_attrs);
-
- /* release file pointer to gem object. */
- drm_gem_object_release(obj);
-
- kfree(mtk_gem);
-}
-
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct mtk_gem_obj *mtk_gem;
+ struct drm_gem_dma_object *dma_obj;
int ret;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
@@ -135,25 +156,25 @@ int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
args->size = args->pitch;
args->size *= args->height;
- mtk_gem = mtk_gem_create(dev, args->size, false);
- if (IS_ERR(mtk_gem))
- return PTR_ERR(mtk_gem);
+ dma_obj = mtk_gem_create(dev, args->size);
+ if (IS_ERR(dma_obj))
+ return PTR_ERR(dma_obj);
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
- ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
+ ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle);
if (ret)
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put(&mtk_gem->base);
+ drm_gem_object_put(&dma_obj->base);
return 0;
err_handle_create:
- mtk_gem_free_object(&mtk_gem->base);
+ mtk_gem_free_object(&dma_obj->base);
return ret;
}
@@ -161,129 +182,50 @@ static int mtk_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
- int ret;
- struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
+ int ret;
/*
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
* whole buffer from the start.
*/
- vma->vm_pgoff = 0;
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
/*
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
+ vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
+
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
- ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
- mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
+ ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr,
+ dma_obj->dma_addr, obj->size);
+ if (ret)
+ drm_gem_vm_close(vma);
return ret;
}
-/*
- * Allocate a sg_table for this GEM object.
- * Note: Both the table's contents, and the sg_table itself must be freed by
- * the caller.
- * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
- */
-struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
- struct mtk_drm_private *priv = obj->dev->dev_private;
- struct sg_table *sgt;
- int ret;
-
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- return ERR_PTR(-ENOMEM);
-
- ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
- mtk_gem->dma_addr, obj->size,
- mtk_gem->dma_attrs);
- if (ret) {
- DRM_ERROR("failed to allocate sgt, %d\n", ret);
- kfree(sgt);
- return ERR_PTR(ret);
- }
-
- return sgt;
-}
-
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach, struct sg_table *sg)
+ struct dma_buf_attachment *attach, struct sg_table *sgt)
{
- struct mtk_gem_obj *mtk_gem;
+ struct drm_gem_dma_object *dma_obj;
/* check if the entries in the sg_table are contiguous */
- if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
+ if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
DRM_ERROR("sg_table is not contiguous");
return ERR_PTR(-EINVAL);
}
- mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
- if (IS_ERR(mtk_gem))
- return ERR_CAST(mtk_gem);
-
- mtk_gem->dma_addr = sg_dma_address(sg->sgl);
- mtk_gem->sg = sg;
-
- return &mtk_gem->base;
-}
-
-int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
-{
- struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
- struct sg_table *sgt = NULL;
- unsigned int npages;
-
- if (mtk_gem->kvaddr)
- goto out;
-
- sgt = mtk_gem_prime_get_sg_table(obj);
- if (IS_ERR(sgt))
- return PTR_ERR(sgt);
-
- npages = obj->size >> PAGE_SHIFT;
- mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
- if (!mtk_gem->pages) {
- sg_free_table(sgt);
- kfree(sgt);
- return -ENOMEM;
- }
-
- drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
-
- mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
- if (!mtk_gem->kvaddr) {
- sg_free_table(sgt);
- kfree(sgt);
- kfree(mtk_gem->pages);
- return -ENOMEM;
- }
- sg_free_table(sgt);
- kfree(sgt);
-
-out:
- iosys_map_set_vaddr(map, mtk_gem->kvaddr);
-
- return 0;
-}
-
-void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
-{
- struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
- void *vaddr = map->vaddr;
+ dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true);
+ if (IS_ERR(dma_obj))
+ return ERR_CAST(dma_obj);
- if (!mtk_gem->pages)
- return;
+ dma_obj->dma_addr = sg_dma_address(sgt->sgl);
+ dma_obj->sgt = sgt;
- vunmap(vaddr);
- mtk_gem->kvaddr = NULL;
- kfree(mtk_gem->pages);
+ return &dma_obj->base;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h
index 66e5f154f698..afebc3a970a8 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_gem.h
@@ -7,42 +7,11 @@
#define _MTK_GEM_H_
#include <drm/drm_gem.h>
+#include <drm/drm_gem_dma_helper.h>
-/*
- * mtk drm buffer structure.
- *
- * @base: a gem object.
- * - a new handle to this gem object would be created
- * by drm_gem_handle_create().
- * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs()
- * @kvaddr: kernel virtual address of gem buffer.
- * @dma_addr: dma address of gem buffer.
- * @dma_attrs: dma attributes of gem buffer.
- *
- * P.S. this object would be transferred to user as kms_bo.handle so
- * user can access the buffer through kms_bo.handle.
- */
-struct mtk_gem_obj {
- struct drm_gem_object base;
- void *cookie;
- void *kvaddr;
- dma_addr_t dma_addr;
- unsigned long dma_attrs;
- struct sg_table *sg;
- struct page **pages;
-};
-
-#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
-
-void mtk_gem_free_object(struct drm_gem_object *gem);
-struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
- bool alloc_kmap);
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
-int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.c b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c
index e78eb0876f16..bd7f8c56ec9c 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_common.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c
@@ -303,7 +303,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, struct platform_device
return dev_err_probe(dev, ret, "Failed to get clocks\n");
hdmi->irq = platform_get_irq(pdev, 0);
- if (!hdmi->irq)
+ if (hdmi->irq < 0)
return hdmi->irq;
hdmi->regs = device_node_to_regmap(dev->of_node);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.h b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h
index de5e064585f8..7a644bbf5843 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_common.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h
@@ -168,7 +168,7 @@ struct mtk_hdmi {
bool audio_enable;
bool powered;
bool enabled;
- unsigned int irq;
+ int irq;
enum hdmi_hpd_state hpd;
hdmi_codec_plugged_cb plugged_cb;
struct device *codec_dev;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
index b844e2c10f28..d937219fdb7e 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
@@ -66,11 +66,19 @@ static int mtk_ddc_check_and_rise_low_bus(struct mtk_hdmi_ddc *ddc)
return 0;
}
-static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
- u16 offset_id, u8 *wr_data)
+static int mtk_ddcm_write_hdmi(struct mtk_hdmi_ddc *ddc, u16 addr_id,
+ u16 offset_id, u16 data_cnt, u8 *wr_data)
{
u32 val;
- int ret;
+ int ret, i;
+
+ /* Don't allow transfer with a size over than the transfer fifo size
+ * (16 byte)
+ */
+ if (data_cnt > 16) {
+ dev_err(ddc->dev, "Invalid DDCM write request\n");
+ return -EINVAL;
+ }
/* If down, rise bus for write operation */
mtk_ddc_check_and_rise_low_bus(ddc);
@@ -78,16 +86,21 @@ static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT,
FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT));
+ /* In case there is no payload data, just do a single write for the
+ * address only
+ */
if (wr_data) {
- regmap_write(ddc->regs, SI2C_CTRL,
- FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
- FIELD_PREP(SI2C_WDATA, *wr_data) |
- SI2C_WR);
+ /* Fill transfer fifo with payload data */
+ for (i = 0; i < data_cnt; i++) {
+ regmap_write(ddc->regs, SI2C_CTRL,
+ FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
+ FIELD_PREP(SI2C_WDATA, wr_data[i]) |
+ SI2C_WR);
+ }
}
-
regmap_write(ddc->regs, DDC_CTRL,
FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) |
- FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) |
+ FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : data_cnt) |
FIELD_PREP(DDC_CTRL_OFFSET, offset_id) |
FIELD_PREP(DDC_CTRL_ADDR, addr_id));
usleep_range(1000, 1250);
@@ -96,6 +109,11 @@ static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
!(val & DDC_I2C_IN_PROG), 500, 1000);
if (ret) {
dev_err(ddc->dev, "DDC I2C write timeout\n");
+
+ /* Abort transfer if it is still in progress */
+ regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER));
+
return ret;
}
@@ -179,6 +197,11 @@ static int mtk_ddcm_read_hdmi(struct mtk_hdmi_ddc *ddc, u16 uc_dev,
500 * (temp_length + 5));
if (ret) {
dev_err(ddc->dev, "Timeout waiting for DDC I2C\n");
+
+ /* Abort transfer if it is still in progress */
+ regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER));
+
return ret;
}
@@ -250,24 +273,9 @@ static int mtk_hdmi_fg_ddc_data_read(struct mtk_hdmi_ddc *ddc, u16 b_dev,
static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev,
u8 data_addr, u16 data_cnt, u8 *pr_data)
{
- int i, ret;
-
regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN);
- /*
- * In case there is no payload data, just do a single write for the
- * address only
- */
- if (data_cnt == 0)
- return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL);
- i = 0;
- do {
- ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i);
- if (ret)
- return ret;
- } while (++i < data_cnt);
-
- return 0;
+ return mtk_ddcm_write_hdmi(ddc, b_dev, data_addr, data_cnt, pr_data);
}
static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
index c272e1e74b7d..454b8b93b834 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
@@ -1120,9 +1120,10 @@ static void mtk_hdmi_v2_hpd_disable(struct drm_bridge *bridge)
mtk_hdmi_v2_disable(hdmi);
}
-static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- unsigned long long tmds_rate)
+static enum drm_mode_status
+mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
if (mode->clock < MTK_HDMI_V2_CLOCK_MIN)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 5043e0377270..fcd10d7e8342 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -11,13 +11,13 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include <linux/align.h>
#include "mtk_crtc.h"
#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_gem.h"
#include "mtk_plane.h"
static const u64 modifiers[] = {
@@ -114,8 +114,8 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
struct mtk_plane_state *mtk_plane_state)
{
struct drm_framebuffer *fb = new_state->fb;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem;
- struct mtk_gem_obj *mtk_gem;
unsigned int pitch, format;
u64 modifier;
dma_addr_t addr;
@@ -124,8 +124,8 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
int offset;
gem = fb->obj[0];
- mtk_gem = to_mtk_gem_obj(gem);
- addr = mtk_gem->dma_addr;
+ dma_obj = to_drm_gem_dma_obj(gem);
+ addr = dma_obj->dma_addr;
pitch = fb->pitches[0];
format = fb->format->format;
modifier = fb->modifier;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 93f8f4f64578..b43c4f9bbcdf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(statec))
return (void *)statec;
+
+ return nv50_head_atom(statec);
+}
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!statec)
+ return NULL;
+
return nv50_head_atom(statec);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index a95ee5dcc2e3..1a889139cb05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -84,6 +84,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
+ nv50_atom(asyh->state.state)->lock_core = true;
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 3dd742b4f823..e32ed1db6c56 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -43,6 +43,9 @@ nv50_head_flush_clr(struct nv50_head *head,
union nv50_head_atom_mask clr = {
.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
};
+
+ lockdep_assert_held(&head->disp->mutex);
+
if (clr.crc) nv50_crc_atomic_clr(head);
if (clr.olut) head->func->olut_clr(head);
if (clr.core) head->func->core_clr(head);
@@ -65,6 +68,8 @@ nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
+ lockdep_assert_held(&head->disp->mutex);
+
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ef9e410babbf..9a2c20fce0f3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
- asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
if (IS_ERR(asyh))
return PTR_ERR(asyh);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index d1beaad0c82b..834ed6587aa5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -1,28 +1,81 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_CONN_H__
#define __NVBIOS_CONN_H__
+
+/*
+ * An enumerator representing all of the possible VBIOS connector types defined
+ * by Nvidia at
+ * https://nvidia.github.io/open-gpu-doc/DCB/DCB-4.x-Specification.html.
+ *
+ * [1] Nvidia's documentation actually claims DCB_CONNECTOR_HDMI_0 is a "3-Pin
+ * DIN Stereo Connector". This seems very likely to be a documentation typo
+ * or some sort of funny historical baggage, because we've treated this
+ * connector type as HDMI for years without issue.
+ * TODO: Check with Nvidia what's actually happening here.
+ */
enum dcb_connector_type {
- DCB_CONNECTOR_VGA = 0x00,
- DCB_CONNECTOR_TV_0 = 0x10,
- DCB_CONNECTOR_TV_1 = 0x11,
- DCB_CONNECTOR_TV_3 = 0x13,
- DCB_CONNECTOR_DVI_I = 0x30,
- DCB_CONNECTOR_DVI_D = 0x31,
- DCB_CONNECTOR_DMS59_0 = 0x38,
- DCB_CONNECTOR_DMS59_1 = 0x39,
- DCB_CONNECTOR_LVDS = 0x40,
- DCB_CONNECTOR_LVDS_SPWG = 0x41,
- DCB_CONNECTOR_DP = 0x46,
- DCB_CONNECTOR_eDP = 0x47,
- DCB_CONNECTOR_mDP = 0x48,
- DCB_CONNECTOR_HDMI_0 = 0x60,
- DCB_CONNECTOR_HDMI_1 = 0x61,
- DCB_CONNECTOR_HDMI_C = 0x63,
- DCB_CONNECTOR_DMS59_DP0 = 0x64,
- DCB_CONNECTOR_DMS59_DP1 = 0x65,
- DCB_CONNECTOR_WFD = 0x70,
- DCB_CONNECTOR_USB_C = 0x71,
- DCB_CONNECTOR_NONE = 0xff
+ /* Analog outputs */
+ DCB_CONNECTOR_VGA = 0x00, // VGA 15-pin connector
+ DCB_CONNECTOR_DVI_A = 0x01, // DVI-A
+ DCB_CONNECTOR_POD_VGA = 0x02, // Pod - VGA 15-pin connector
+ DCB_CONNECTOR_TV_0 = 0x10, // TV - Composite Out
+ DCB_CONNECTOR_TV_1 = 0x11, // TV - S-Video Out
+ DCB_CONNECTOR_TV_2 = 0x12, // TV - S-Video Breakout - Composite
+ DCB_CONNECTOR_TV_3 = 0x13, // HDTV Component - YPrPb
+ DCB_CONNECTOR_TV_SCART = 0x14, // TV - SCART Connector
+ DCB_CONNECTOR_TV_SCART_D = 0x16, // TV - Composite SCART over D-connector
+ DCB_CONNECTOR_TV_DTERM = 0x17, // HDTV - D-connector (EIAJ4120)
+ DCB_CONNECTOR_POD_TV_3 = 0x18, // Pod - HDTV - YPrPb
+ DCB_CONNECTOR_POD_TV_1 = 0x19, // Pod - S-Video
+ DCB_CONNECTOR_POD_TV_0 = 0x1a, // Pod - Composite
+
+ /* DVI digital outputs */
+ DCB_CONNECTOR_DVI_I_TV_1 = 0x20, // DVI-I-TV-S-Video
+ DCB_CONNECTOR_DVI_I_TV_0 = 0x21, // DVI-I-TV-Composite
+ DCB_CONNECTOR_DVI_I_TV_2 = 0x22, // DVI-I-TV-S-Video Breakout-Composite
+ DCB_CONNECTOR_DVI_I = 0x30, // DVI-I
+ DCB_CONNECTOR_DVI_D = 0x31, // DVI-D
+ DCB_CONNECTOR_DVI_ADC = 0x32, // Apple Display Connector (ADC)
+ DCB_CONNECTOR_DMS59_0 = 0x38, // LFH-DVI-I-1
+ DCB_CONNECTOR_DMS59_1 = 0x39, // LFH-DVI-I-2
+ DCB_CONNECTOR_BNC = 0x3c, // BNC Connector [for SDI?]
+
+ /* LVDS / TMDS digital outputs */
+ DCB_CONNECTOR_LVDS = 0x40, // LVDS-SPWG-Attached [is this name correct?]
+ DCB_CONNECTOR_LVDS_SPWG = 0x41, // LVDS-OEM-Attached (non-removable)
+ DCB_CONNECTOR_LVDS_REM = 0x42, // LVDS-SPWG-Detached [following naming above]
+ DCB_CONNECTOR_LVDS_SPWG_REM = 0x43, // LVDS-OEM-Detached (removable)
+ DCB_CONNECTOR_TMDS = 0x45, // TMDS-OEM-Attached (non-removable)
+
+ /* DP digital outputs */
+ DCB_CONNECTOR_DP = 0x46, // DisplayPort External Connector
+ DCB_CONNECTOR_eDP = 0x47, // DisplayPort Internal Connector
+ DCB_CONNECTOR_mDP = 0x48, // DisplayPort (Mini) External Connector
+
+ /* Dock outputs (not used) */
+ DCB_CONNECTOR_DOCK_VGA_0 = 0x50, // VGA 15-pin if not docked
+ DCB_CONNECTOR_DOCK_VGA_1 = 0x51, // VGA 15-pin if docked
+ DCB_CONNECTOR_DOCK_DVI_I_0 = 0x52, // DVI-I if not docked
+ DCB_CONNECTOR_DOCK_DVI_I_1 = 0x53, // DVI-I if docked
+ DCB_CONNECTOR_DOCK_DVI_D_0 = 0x54, // DVI-D if not docked
+ DCB_CONNECTOR_DOCK_DVI_D_1 = 0x55, // DVI-D if docked
+ DCB_CONNECTOR_DOCK_DP_0 = 0x56, // DisplayPort if not docked
+ DCB_CONNECTOR_DOCK_DP_1 = 0x57, // DisplayPort if docked
+ DCB_CONNECTOR_DOCK_mDP_0 = 0x58, // DisplayPort (Mini) if not docked
+ DCB_CONNECTOR_DOCK_mDP_1 = 0x59, // DisplayPort (Mini) if docked
+
+ /* HDMI? digital outputs */
+ DCB_CONNECTOR_HDMI_0 = 0x60, // HDMI? See [1] in top-level enum comment above
+ DCB_CONNECTOR_HDMI_1 = 0x61, // HDMI-A connector
+ DCB_CONNECTOR_SPDIF = 0x62, // Audio S/PDIF connector
+ DCB_CONNECTOR_HDMI_C = 0x63, // HDMI-C (Mini) connector
+
+ /* Misc. digital outputs */
+ DCB_CONNECTOR_DMS59_DP0 = 0x64, // LFH-DP-1
+ DCB_CONNECTOR_DMS59_DP1 = 0x65, // LFH-DP-2
+ DCB_CONNECTOR_WFD = 0x70, // Virtual connector for Wifi Display (WFD)
+ DCB_CONNECTOR_USB_C = 0x71, // [DP over USB-C; not present in docs]
+ DCB_CONNECTOR_NONE = 0xff // Skip Entry
};
struct nvbios_connT {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 00515623a2cc..829c2b573971 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -352,6 +352,8 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
+ .atomic_commit = drm_atomic_helper_commit,
+ .atomic_check = drm_atomic_helper_check,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 58071652679d..3d8031296eed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -425,7 +425,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
order = ilog2(DMEM_CHUNK_NPAGES);
}
- zone_device_folio_init(folio, order);
+ zone_device_folio_init(folio, page_pgmap(folio_page(folio, 0)), order);
return page;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 2dab6612c4fc..23d1e5c27bb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -191,27 +191,60 @@ nvkm_uconn_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nv
spin_lock(&disp->client.lock);
if (!conn->object.func) {
switch (conn->info.type) {
- case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break;
- case DCB_CONNECTOR_TV_0 :
- case DCB_CONNECTOR_TV_1 :
- case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break;
- case DCB_CONNECTOR_DMS59_0 :
- case DCB_CONNECTOR_DMS59_1 :
- case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break;
- case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break;
- case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break;
- case DCB_CONNECTOR_LVDS_SPWG: args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break;
- case DCB_CONNECTOR_DMS59_DP0:
- case DCB_CONNECTOR_DMS59_DP1:
- case DCB_CONNECTOR_DP :
- case DCB_CONNECTOR_mDP :
- case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break;
- case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break;
- case DCB_CONNECTOR_HDMI_0 :
- case DCB_CONNECTOR_HDMI_1 :
- case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break;
+ /* VGA */
+ case DCB_CONNECTOR_DVI_A :
+ case DCB_CONNECTOR_POD_VGA :
+ case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break;
+
+ /* TV */
+ case DCB_CONNECTOR_TV_0 :
+ case DCB_CONNECTOR_TV_1 :
+ case DCB_CONNECTOR_TV_2 :
+ case DCB_CONNECTOR_TV_SCART :
+ case DCB_CONNECTOR_TV_SCART_D :
+ case DCB_CONNECTOR_TV_DTERM :
+ case DCB_CONNECTOR_POD_TV_3 :
+ case DCB_CONNECTOR_POD_TV_1 :
+ case DCB_CONNECTOR_POD_TV_0 :
+ case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break;
+
+ /* DVI */
+ case DCB_CONNECTOR_DVI_I_TV_1 :
+ case DCB_CONNECTOR_DVI_I_TV_0 :
+ case DCB_CONNECTOR_DVI_I_TV_2 :
+ case DCB_CONNECTOR_DVI_ADC :
+ case DCB_CONNECTOR_DMS59_0 :
+ case DCB_CONNECTOR_DMS59_1 :
+ case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break;
+ case DCB_CONNECTOR_TMDS :
+ case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break;
+
+ /* LVDS */
+ case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break;
+ case DCB_CONNECTOR_LVDS_SPWG : args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break;
+
+ /* DP */
+ case DCB_CONNECTOR_DMS59_DP0 :
+ case DCB_CONNECTOR_DMS59_DP1 :
+ case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
+ case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break;
+ case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break;
+
+ /* HDMI */
+ case DCB_CONNECTOR_HDMI_0 :
+ case DCB_CONNECTOR_HDMI_1 :
+ case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break;
+
+ /*
+ * Dock & unused outputs.
+ * BNC, SPDIF, WFD, and detached LVDS go here.
+ */
default:
- WARN_ON(1);
+ nvkm_warn(&disp->engine.subdev,
+ "unimplemented connector type 0x%02x\n",
+ conn->info.type);
+ args->v0.type = NVIF_CONN_V0_VGA;
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index 35d1fcef520b..c456a9626823 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -30,6 +30,9 @@ ad102_gsp = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 503760246660..851140e80122 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -337,18 +337,12 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
-nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
}
-void
-nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
-{
- nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
-}
-
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index d201e8697226..27a13aeccd3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -47,6 +47,9 @@ ga100_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 917f7e2f6c46..b6b3eb6f4c00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -158,6 +158,9 @@ ga102_gsp_r535 = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 86bdd203bc10..9dd66a2e3801 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -7,9 +7,8 @@ enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
-int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
-void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp);
struct nvkm_gsp_fwif {
int version;
@@ -52,6 +51,11 @@ struct nvkm_gsp_func {
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
+ struct {
+ int (*ctor)(struct nvkm_gsp *);
+ void (*dtor)(struct nvkm_gsp *);
+ } fwsec_sb;
+
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
@@ -67,6 +71,8 @@ extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
+void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_init(struct nvkm_gsp *);
int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
@@ -91,5 +97,18 @@ int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
+static inline int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.ctor)
+ return gsp->func->fwsec_sb.ctor(gsp);
+ return 0;
+}
+
+static inline void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.dtor)
+ gsp->func->fwsec_sb.dtor(gsp);
+}
+
extern const struct nvkm_gsp_func gv100_gsp;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 81e56da0474a..04b642a1f730 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -30,6 +30,18 @@
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+int
+tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ return nvkm_gsp_fwsec_sb_init(gsp);
+}
+
+void
+tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
+}
+
static int
tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -370,6 +382,9 @@ tu102_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 97eb046c25d0..58cf25842421 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -30,6 +30,9 @@ tu116_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b26b682826bc..162cc58c7b8f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -623,49 +623,6 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
if (IS_ERR(desc))
return ERR_CAST(desc);
- panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
- &panel_simple_funcs, desc->connector_type);
- if (IS_ERR(panel))
- return ERR_CAST(panel);
-
- panel->desc = desc;
-
- panel->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(panel->supply))
- return ERR_CAST(panel->supply);
-
- panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio))
- return dev_err_cast_probe(dev, panel->enable_gpio,
- "failed to request GPIO\n");
-
- err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
- if (err) {
- dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
- return ERR_PTR(err);
- }
-
- ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
- if (ddc) {
- panel->ddc = of_find_i2c_adapter_by_node(ddc);
- of_node_put(ddc);
-
- if (!panel->ddc)
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
- !of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
-
- if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* Optional data-mapping property for overriding bus format */
- err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
- if (err)
- goto free_ddc;
- }
-
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
@@ -690,8 +647,7 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
- err = -EINVAL;
- goto free_ddc;
+ return ERR_PTR(-EINVAL);
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -720,6 +676,49 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
}
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, connector_type);
+ if (IS_ERR(panel))
+ return ERR_CAST(panel);
+
+ panel->desc = desc;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return ERR_CAST(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_cast_probe(dev, panel->enable_gpio,
+ "failed to request GPIO\n");
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return ERR_PTR(err);
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
+ !of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+
+ if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* Optional data-mapping property for overriding bus format */
+ err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
+ if (err)
+ goto free_ddc;
+ }
+
dev_set_drvdata(dev, panel);
/*
@@ -1900,6 +1899,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index d4839d282689..f6339963e496 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1252,17 +1252,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
- /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
- * pre-allocated BO if the <BO,VM> association exists. Given we
- * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
- * be called immediately, and we have to hold the VM resv lock when
- * calling this function.
- */
- dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->base.base.gpuva.lock);
- dma_resv_unlock(panthor_vm_resv(vm));
op_ctx->map.bo_offset = offset;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 56ff6a3fb483..d7dc83cf7b00 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -295,7 +295,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
- return ret;
+ goto dev_put;
}
ret = pl111_modeset_init(drm);
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 969a8fb0ee9e..f4e71046dc91 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[] __counted_by(ucNumEntries);
+ UCHAR clockInfo[] /*__counted_by(ucNumEntries)*/;
}ClockInfoArray;
typedef struct _NonClockInfoArray{
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index c9fe6aa3e3e3..8604342f9943 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -121,7 +121,7 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
struct drm_crtc *crtc = encoder->crtc;
/* Unconditionally switch to TMDS as FRL is not yet supported */
- gpiod_set_value(hdmi->frl_enable_gpio, 0);
+ gpiod_set_value_cansleep(hdmi->frl_enable_gpio, 0);
if (!crtc || !crtc->state)
return;
@@ -640,6 +640,15 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
}
+static int __maybe_unused dw_hdmi_qp_rockchip_suspend(struct device *dev)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_qp_suspend(dev, hdmi->hdmi);
+
+ return 0;
+}
+
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
@@ -655,7 +664,8 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
}
static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_qp_rockchip_suspend,
+ dw_hdmi_qp_rockchip_resume)
};
struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index cd8380f0eddc..f3950e8476a7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2104,7 +2104,7 @@ static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
* Spin until the previous port_mux figuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
- port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ port_mux_sel == vop2->old_port_sel, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
port_mux_sel, vop2->old_port_sel);
@@ -2124,7 +2124,7 @@ static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
* Spin until the previous layer configuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
- atv_layer_cfg == cfg, 0, 50 * 1000);
+ atv_layer_cfg == cfg, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
atv_layer_cfg, cfg);
@@ -2144,6 +2144,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
u8 layer_sel_id;
unsigned int ofs;
u32 ovl_ctrl;
+ u32 cfg_done;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
@@ -2298,8 +2299,16 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
rk3568_vop2_wait_for_port_mux_done(vop2);
}
- if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
- rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) {
+ cfg_done = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+ cfg_done &= (BIT(vop2->data->nr_vps) - 1);
+ cfg_done &= ~BIT(vp->id);
+ /*
+ * Changes of other VPs' overlays have not taken effect
+ */
+ if (cfg_done)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ }
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
mutex_unlock(&vop2->ovl_lock);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index da670d7eeb2e..de96bfe7562c 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -55,15 +55,6 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
#endif
/*
- * Input parsing
- */
-
-int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-
-/*
* Display modes
*/
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 86eb5d97410b..8bb93194e5ac 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -26,9 +26,33 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
- drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+ /*
+ * TI's OLDI and DSI encoders need to be set up before the crtc is
+ * enabled. Thus drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() cannot be used here, as
+ * they enable the crtc before bridges' pre-enable, and disable the crtc
+ * after bridges' post-disable.
+ *
+ * Open code the functions here and first call the bridges' pre-enables,
+ * then crtc enable, then bridges' post-enable (and vice versa for
+ * disable).
+ */
+
+ drm_atomic_helper_commit_encoder_bridge_disable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_disable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(ddev, old_state);
+
+ drm_atomic_helper_update_legacy_modeset_state(ddev, old_state);
+ drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_commit_crtc_set_mode(ddev, old_state);
+
+ drm_atomic_helper_commit_planes(ddev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_enable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_enable(ddev, old_state);
+ drm_atomic_helper_commit_writebacks(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);
diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c
index 5c3ffc78aea0..d03a1f2e9c41 100644
--- a/drivers/gpu/drm/vkms/vkms_colorop.c
+++ b/drivers/gpu/drm/vkms/vkms_colorop.c
@@ -37,7 +37,6 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr
goto cleanup;
list->type = ops[i]->base.id;
- list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
i++;
@@ -88,6 +87,8 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr
drm_colorop_set_next_property(ops[i - 1], ops[i]);
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id);
+
return 0;
cleanup:
@@ -103,18 +104,18 @@ cleanup:
int vkms_initialize_colorops(struct drm_plane *plane)
{
- struct drm_prop_enum_list pipeline;
- int ret;
+ struct drm_prop_enum_list pipeline = {};
+ int ret = 0;
/* Add color pipeline */
ret = vkms_initialize_color_pipeline(plane, &pipeline);
if (ret)
- return ret;
+ goto out;
/* Create COLOR_PIPELINE property and attach */
ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1);
- if (ret)
- return ret;
- return 0;
+ kfree(pipeline.name);
+out:
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f031a312c783..b22887e8c881 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -32,9 +32,15 @@
#include <drm/ttm/ttm_placement.h>
-static void vmw_bo_release(struct vmw_bo *vbo)
+/**
+ * vmw_bo_free - vmw_bo destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_resource *res;
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
@@ -62,20 +68,8 @@ static void vmw_bo_release(struct vmw_bo *vbo)
}
vmw_surface_unreference(&vbo->dumb_surface);
}
- drm_gem_object_release(&vbo->tbo.base);
-}
-
-/**
- * vmw_bo_free - vmw_bo destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_bo *vbo = to_vmw_bo(&bo->base);
-
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_release(vbo);
+ drm_gem_object_release(&vbo->tbo.base);
WARN_ON(vbo->dirty);
kfree(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 00be92da5509..85795082fef9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,12 +515,12 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
/**
* vmw_event_fence_action_seq_passed
*
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
+ * @f: The struct dma_fence which provides timestamp for the action event
+ * @cb: The struct dma_fence_cb callback for the action event.
*
- * This function is called when the seqno of the fence where @action is
- * attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context.
+ * This function is called when the seqno of the fence has passed
+ * and it is always called from atomic context.
+ * It queues the event on the submitter's event list.
*/
static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
struct dma_fence_cb *cb)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d32ce1cb579e..bc51b5d55e38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -766,13 +766,15 @@ err_out:
return ERR_PTR(ret);
}
- ttm_bo_reserve(&bo->tbo, false, false, NULL);
- ret = vmw_bo_dirty_add(bo);
- if (!ret && surface && surface->res.func->dirty_alloc) {
- surface->res.coherent = true;
- ret = surface->res.func->dirty_alloc(&surface->res);
+ if (bo) {
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
}
- ttm_bo_unreserve(&bo->tbo);
return &vfb->base;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 69dfe69ce0f8..a8c8c9375d29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -923,8 +923,10 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
- if (unlikely(ret != 0))
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
goto no_reserve;
+ }
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 4b288eb3f5b0..4d7dcaff2b91 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -39,7 +39,7 @@ config DRM_XE
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
- select DRM_GPUSVM if !UML && DEVICE_PRIVATE
+ select DRM_GPUSVM if !UML
select DRM_GPUVM
select DRM_SCHED
select MMU_NOTIFIER
@@ -80,8 +80,9 @@ config DRM_XE_GPUSVM
bool "Enable CPU to GPU address mirroring"
depends on DRM_XE
depends on !UML
- depends on DEVICE_PRIVATE
+ depends on ZONE_DEVICE
default y
+ select DEVICE_PRIVATE
select DRM_GPUSVM
help
Enable this option if you want support for CPU to GPU address
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index bf4ee976b680..71acd45aa33b 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1055,6 +1055,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
unsigned long *scanned)
{
struct xe_device *xe = ttm_to_xe_device(bo->bdev);
+ struct ttm_tt *tt = bo->ttm;
long lret;
/* Fake move to system, without copying data. */
@@ -1079,8 +1080,10 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
.writeback = false,
.allow_move = false});
- if (lret > 0)
+ if (lret > 0) {
xe_ttm_tt_account_subtract(xe, bo->ttm);
+ update_global_total_pages(bo->bdev, -(long)tt->num_pages);
+ }
return lret;
}
@@ -1166,8 +1169,10 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
if (needs_rpm)
xe_pm_runtime_put(xe);
- if (lret > 0)
+ if (lret > 0) {
xe_ttm_tt_account_subtract(xe, tt);
+ update_global_total_pages(bo->bdev, -(long)tt->num_pages);
+ }
out_unref:
xe_bo_put(xe_bo);
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index e91da9589c5f..63fd8bf13c70 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -256,14 +256,64 @@ static ssize_t wedged_mode_show(struct file *f, char __user *ubuf,
return simple_read_from_buffer(ubuf, size, pos, buf, len);
}
+static int __wedged_mode_set_reset_policy(struct xe_gt *gt, enum xe_wedged_mode mode)
+{
+ bool enable_engine_reset;
+ int ret;
+
+ enable_engine_reset = (mode != XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET);
+ ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads,
+ enable_engine_reset);
+ if (ret)
+ xe_gt_err(gt, "Failed to update GuC ADS scheduler policy (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int wedged_mode_set_reset_policy(struct xe_device *xe, enum xe_wedged_mode mode)
+{
+ struct xe_gt *gt;
+ int ret;
+ u8 id;
+
+ guard(xe_pm_runtime)(xe);
+ for_each_gt(gt, xe, id) {
+ ret = __wedged_mode_set_reset_policy(gt, mode);
+ if (ret) {
+ if (id > 0) {
+ xe->wedged.inconsistent_reset = true;
+ drm_err(&xe->drm, "Inconsistent reset policy state between GTs\n");
+ }
+ return ret;
+ }
+ }
+
+ xe->wedged.inconsistent_reset = false;
+
+ return 0;
+}
+
+static bool wedged_mode_needs_policy_update(struct xe_device *xe, enum xe_wedged_mode mode)
+{
+ if (xe->wedged.inconsistent_reset)
+ return true;
+
+ if (xe->wedged.mode == mode)
+ return false;
+
+ if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET ||
+ mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET)
+ return true;
+
+ return false;
+}
+
static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
size_t size, loff_t *pos)
{
struct xe_device *xe = file_inode(f)->i_private;
- struct xe_gt *gt;
u32 wedged_mode;
ssize_t ret;
- u8 id;
ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode);
if (ret)
@@ -272,22 +322,14 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
if (wedged_mode > 2)
return -EINVAL;
- if (xe->wedged.mode == wedged_mode)
- return size;
+ if (wedged_mode_needs_policy_update(xe, wedged_mode)) {
+ ret = wedged_mode_set_reset_policy(xe, wedged_mode);
+ if (ret)
+ return ret;
+ }
xe->wedged.mode = wedged_mode;
- xe_pm_runtime_get(xe);
- for_each_gt(gt, xe, id) {
- ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
- if (ret) {
- xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
- xe_pm_runtime_put(xe);
- return -EIO;
- }
- }
- xe_pm_runtime_put(xe);
-
return size;
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 0b2fa7c56d38..047e86e22133 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -44,6 +44,22 @@ struct xe_pat_ops;
struct xe_pxp;
struct xe_vram_region;
+/**
+ * enum xe_wedged_mode - possible wedged modes
+ * @XE_WEDGED_MODE_NEVER: Device will never be declared wedged.
+ * @XE_WEDGED_MODE_UPON_CRITICAL_ERROR: Device will be declared wedged only
+ * when critical error occurs like GT reset failure or firmware failure.
+ * This is the default mode.
+ * @XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET: Device will be declared wedged on
+ * any hang. In this mode, engine resets are disabled to avoid automatic
+ * recovery attempts. This mode is primarily intended for debugging hangs.
+ */
+enum xe_wedged_mode {
+ XE_WEDGED_MODE_NEVER = 0,
+ XE_WEDGED_MODE_UPON_CRITICAL_ERROR = 1,
+ XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET = 2,
+};
+
#define XE_BO_INVALID_OFFSET LONG_MAX
#define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
@@ -587,6 +603,8 @@ struct xe_device {
int mode;
/** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
unsigned long method;
+ /** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */
+ bool inconsistent_reset;
} wedged;
/** @bo_device: Struct to control async free of BOs */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 8724f8de67e2..779d7e7e2d2e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -328,6 +328,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
* @xe: Xe device.
* @tile: tile which bind exec queue belongs to.
* @flags: exec queue creation flags
+ * @user_vm: The user VM which this exec queue belongs to
* @extensions: exec queue creation extensions
*
* Normalize bind exec queue creation. Bind exec queue is tied to migration VM
@@ -341,6 +342,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
*/
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
struct xe_tile *tile,
+ struct xe_vm *user_vm,
u32 flags, u64 extensions)
{
struct xe_gt *gt = tile->primary_gt;
@@ -377,6 +379,9 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
xe_exec_queue_put(q);
return ERR_PTR(err);
}
+
+ if (user_vm)
+ q->user_vm = xe_vm_get(user_vm);
}
return q;
@@ -407,6 +412,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
+ if (q->user_vm) {
+ xe_vm_put(q->user_vm);
+ q->user_vm = NULL;
+ }
+
q->ops->destroy(q);
}
@@ -742,6 +752,22 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
return -EINVAL;
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -ENOENT;
+
+ err = down_read_interruptible(&vm->lock);
+ if (err) {
+ xe_vm_put(vm);
+ return err;
+ }
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ up_read(&vm->lock);
+ xe_vm_put(vm);
+ return -ENOENT;
+ }
+
for_each_tile(tile, xe, id) {
struct xe_exec_queue *new;
@@ -749,9 +775,11 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (id)
flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
- new = xe_exec_queue_create_bind(xe, tile, flags,
+ new = xe_exec_queue_create_bind(xe, tile, vm, flags,
args->extensions);
if (IS_ERR(new)) {
+ up_read(&vm->lock);
+ xe_vm_put(vm);
err = PTR_ERR(new);
if (q)
goto put_exec_queue;
@@ -763,6 +791,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
list_add_tail(&new->multi_gt_list,
&q->multi_gt_link);
}
+ up_read(&vm->lock);
+ xe_vm_put(vm);
} else {
logical_mask = calc_validate_logical_mask(xe, eci,
args->width,
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index fda4d4f9bda8..37a9da22f420 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -28,6 +28,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
u32 flags, u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
struct xe_tile *tile,
+ struct xe_vm *user_vm,
u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 771ffe35cd0c..3a4263c92b3d 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -54,6 +54,12 @@ struct xe_exec_queue {
struct kref refcount;
/** @vm: VM (address space) for this exec queue */
struct xe_vm *vm;
+ /**
+ * @user_vm: User VM (address space) for this exec queue (bind queues
+ * only)
+ */
+ struct xe_vm *user_vm;
+
/** @class: class of this exec queue */
enum xe_engine_class class;
/**
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index ef481b334af4..793d7324a395 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -322,7 +322,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
else
ggtt->pt_ops = &xelp_pt_ops;
- ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ ggtt->wq = alloc_workqueue("xe-ggtt-wq", WQ_MEM_RECLAIM, 0);
if (!ggtt->wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 420b0e6089de..e8897a77ba19 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -41,10 +41,10 @@ struct xe_gt_sriov_vf_runtime {
};
/**
- * xe_gt_sriov_vf_migration - VF migration data.
+ * struct xe_gt_sriov_vf_migration - VF migration data.
*/
struct xe_gt_sriov_vf_migration {
- /** @migration: VF migration recovery worker */
+ /** @worker: VF migration recovery worker */
struct work_struct worker;
/** @lock: Protects recovery_queued, teardown */
spinlock_t lock;
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index bcb85a1bf26d..3f7f1b5602d5 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -983,16 +983,17 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
/**
* xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
* @ads: Additional data structures object
+ * @enable_engine_reset: true to enable engine resets, false otherwise
*
- * This function update the GuC's engine reset policy based on wedged.mode.
+ * This function update the GuC's engine reset policy.
*
* Return: 0 on success, and negative error code otherwise.
*/
-int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
+int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads,
+ bool enable_engine_reset)
{
struct guc_policies *policies;
struct xe_guc *guc = ads_to_guc(ads);
- struct xe_device *xe = ads_to_xe(ads);
CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
if (!xe_guc_buf_is_valid(buf))
@@ -1004,10 +1005,11 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
policies->is_valid = 1;
- if (xe->wedged.mode == 2)
- policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
- else
+
+ if (enable_engine_reset)
policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+ else
+ policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.h b/drivers/gpu/drm/xe/xe_guc_ads.h
index 2e6674c760ff..7a39f361cb17 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads.h
@@ -6,6 +6,8 @@
#ifndef _XE_GUC_ADS_H_
#define _XE_GUC_ADS_H_
+#include <linux/types.h>
+
struct xe_guc_ads;
int xe_guc_ads_init(struct xe_guc_ads *ads);
@@ -13,6 +15,7 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads);
void xe_guc_ads_populate(struct xe_guc_ads *ads);
void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads);
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads);
-int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads);
+int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads,
+ bool enable_engine_reset);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 4ac434ad216f..a5019d1e741b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
{
g2h_fence->cancel = true;
g2h_fence->fail = true;
- g2h_fence->done = true;
+
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
}
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
@@ -1203,10 +1205,13 @@ retry_same_fence:
return ret;
}
- ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+ /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
+ * and g2h_fence_cancel.
+ */
+ ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
- if (g2h_fence.done) {
+ if (READ_ONCE(g2h_fence.done)) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
@@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- g2h_fence->done = true;
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
smp_mb();
wake_up_all(&ct->g2h_fence_wq);
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
index 0f5da89ce98b..2a8a985c37e7 100644
--- a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
@@ -15,10 +15,12 @@
#define XE_LB_MAX_PAYLOAD_SIZE SZ_4K
/**
- * xe_late_bind_fw_id - enum to determine late binding fw index
+ * enum xe_late_bind_fw_id - enum to determine late binding fw index
*/
enum xe_late_bind_fw_id {
+ /** @XE_LB_FW_FAN_CONTROL: Fan control */
XE_LB_FW_FAN_CONTROL = 0,
+ /** @XE_LB_FW_MAX_ID: Number of IDs */
XE_LB_FW_MAX_ID
};
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index b5083c99dd50..281286f2b5f9 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1050,6 +1050,9 @@ static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
{
u32 *cmd = batch;
+ if (IS_SRIOV_VF(gt_to_xe(lrc->gt)))
+ return 0;
+
if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
return -ENOSPC;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2184af413b91..d8ee76aab4e4 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long sram_offset,
struct drm_pagemap_addr *sram_addr,
u64 vram_addr,
+ struct dma_fence *deps,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+ if (deps && !dma_fence_is_signaled(deps)) {
+ dma_fence_get(deps);
+ err = drm_sched_job_add_dependency(&job->drm, deps);
+ if (err)
+ dma_fence_wait(deps, false);
+ err = 0;
+ }
+
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -2175,6 +2184,8 @@ err:
* @npages: Number of pages to migrate.
* @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from an array dma addresses to a VRAM device physical address
*
@@ -2184,10 +2195,11 @@ err:
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr)
+ u64 dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
- XE_MIGRATE_COPY_TO_VRAM);
+ deps, XE_MIGRATE_COPY_TO_VRAM);
}
/**
@@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
* @dst_addr: Array of DMA information (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr)
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
- XE_MIGRATE_COPY_TO_SRAM);
+ deps, XE_MIGRATE_COPY_TO_SRAM);
}
static void xe_migrate_dma_unmap(struct xe_device *xe,
@@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
&pagemap_addr[current_page],
- vram_addr, write ?
+ vram_addr, NULL, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
@@ -2430,7 +2445,7 @@ void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
if (is_migrate)
mutex_lock(&m->job_mutex);
else
- xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+ xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
}
/**
@@ -2448,7 +2463,7 @@ void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
if (is_migrate)
mutex_unlock(&m->job_mutex);
else
- xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+ xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
}
#if IS_ENABLED(CONFIG_PROVE_LOCKING)
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 260e298e5dd7..b76441f062b4 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr);
+ u64 dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr);
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
index 797a4b866226..d963231b5135 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -346,7 +346,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe)
flags = EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
EXEC_QUEUE_FLAG_MIGRATE;
- q = xe_exec_queue_create_bind(xe, tile, flags, 0);
+ q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto err_ret;
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 55c5a0eb82e1..f97e0af6a9b0 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
static int xe_svm_copy(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages, const enum xe_svm_copy_dir dir)
+ unsigned long npages, const enum xe_svm_copy_dir dir,
+ struct dma_fence *pre_migrate_fence)
{
struct xe_vram_region *vr = NULL;
struct xe_gt *gt = NULL;
@@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
@@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
vram_addr, (u64)pagemap_addr[pos].addr, 1);
__fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
(u64)pagemap_addr[pos].addr, vram_addr, 1);
__fence = xe_migrate_to_vram(vr->migrate, 1,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -629,6 +634,8 @@ err_out:
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
+ if (pre_migrate_fence)
+ dma_fence_wait(pre_migrate_fence, false);
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
@@ -645,16 +652,20 @@ err_out:
static int xe_svm_copy_to_devmem(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
+ pre_migrate_fence);
}
static int xe_svm_copy_to_ram(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
+ pre_migrate_fence);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct xe_device *xe = xe_bo_device(bo);
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
xe_bo_put_async(bo);
xe_pm_runtime_put(xe);
}
@@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long timeslice_ms)
{
struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct dma_fence *pre_migrate_fence = NULL;
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
struct drm_buddy_block *block;
@@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
break;
}
+ /* Ensure that any clearing or async eviction will complete before migration. */
+ if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
+ err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ &pre_migrate_fence);
+ if (err)
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ else if (pre_migrate_fence)
+ dma_fence_enable_sw_signaling(pre_migrate_fence);
+ }
+
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops, dpagemap, end - start);
+ &dpagemap_devmem_ops, dpagemap, end - start,
+ pre_migrate_fence);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
@@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
xe_assert(vm->xe, IS_DGFX(vm->xe));
if (xe_svm_range_in_vram(range)) {
- drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 79ab6c512d3e..095bb197e8b0 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1617,7 +1617,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
if (!vm->pt_root[id])
continue;
- q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
+ q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto err_close;
@@ -3578,6 +3578,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
+ if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) {
+ err = -EINVAL;
+ goto put_exec_queue;
+ }
+
/* Ensure all UNMAPs visible */
xe_svm_flush(vm);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index ef8a5019574e..016f6786134c 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -379,7 +379,7 @@ static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *
}
/**
- * xe_vm_set_validation_exec() - Accessor to read the drm_exec object
+ * xe_vm_validation_exec() - Accessor to read the drm_exec object
* @vm: The vm we want to register a drm_exec object with.
*
* Return: The drm_exec object used to lock the vm's resv. The value
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index 20d3e6d0d796..527920f9c4d3 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -3,7 +3,7 @@ config NOVA_CORE
depends on 64BIT
depends on PCI
depends on RUST
- depends on RUST_FW_LOADER_ABSTRACTIONS
+ select RUST_FW_LOADER_ABSTRACTIONS
select AUXILIARY_BUS
default n
help
diff --git a/drivers/gpu/nova-core/gsp/cmdq.rs b/drivers/gpu/nova-core/gsp/cmdq.rs
index 6f946d14868a..3991ccc0c10f 100644
--- a/drivers/gpu/nova-core/gsp/cmdq.rs
+++ b/drivers/gpu/nova-core/gsp/cmdq.rs
@@ -588,21 +588,23 @@ impl Cmdq {
header.length(),
);
+ let payload_length = header.payload_length();
+
// Check that the driver read area is large enough for the message.
- if slice_1.len() + slice_2.len() < header.length() {
+ if slice_1.len() + slice_2.len() < payload_length {
return Err(EIO);
}
// Cut the message slices down to the actual length of the message.
- let (slice_1, slice_2) = if slice_1.len() > header.length() {
- // PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
- (slice_1.split_at(header.length()).0, &slice_2[0..0])
+ let (slice_1, slice_2) = if slice_1.len() > payload_length {
+ // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
+ (slice_1.split_at(payload_length).0, &slice_2[0..0])
} else {
(
slice_1,
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
- // large as `msg_header.length()`.
- slice_2.split_at(header.length() - slice_1.len()).0,
+ // large as `payload_length`.
+ slice_2.split_at(payload_length - slice_1.len()).0,
)
};
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
index abffd6beec65..caeb0d251fe5 100644
--- a/drivers/gpu/nova-core/gsp/fw.rs
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -141,8 +141,8 @@ unsafe impl AsBytes for GspFwWprMeta {}
// are valid.
unsafe impl FromBytes for GspFwWprMeta {}
-type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1;
-type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
+type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
+type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
impl GspFwWprMeta {
/// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
@@ -150,8 +150,8 @@ impl GspFwWprMeta {
pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
Self(bindings::GspFwWprMeta {
// CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
- magic: r570_144::GSP_FW_WPR_META_MAGIC as u64,
- revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION),
+ magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
+ revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
@@ -315,19 +315,19 @@ impl From<MsgFunction> for u32 {
#[repr(u32)]
pub(crate) enum SeqBufOpcode {
// Core operation opcodes
- CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
- CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
- CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
- CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+ CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
+ CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
// Delay opcode
- DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
+ DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
// Register operation opcodes
- RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
- RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
- RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
+ RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
+ RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
+ RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
}
impl fmt::Display for SeqBufOpcode {
@@ -351,25 +351,25 @@ impl TryFrom<u32> for SeqBufOpcode {
fn try_from(value: u32) -> Result<SeqBufOpcode> {
match value {
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
Ok(SeqBufOpcode::CoreReset)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
Ok(SeqBufOpcode::CoreResume)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
Ok(SeqBufOpcode::CoreStart)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
Ok(SeqBufOpcode::CoreWaitForHalt)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
Ok(SeqBufOpcode::RegModify)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
_ => Err(EINVAL),
}
}
@@ -385,7 +385,7 @@ impl From<SeqBufOpcode> for u32 {
/// Wrapper for GSP sequencer register write payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
+pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
impl RegWritePayload {
/// Returns the register address.
@@ -408,7 +408,7 @@ unsafe impl AsBytes for RegWritePayload {}
/// Wrapper for GSP sequencer register modify payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
+pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
impl RegModifyPayload {
/// Returns the register address.
@@ -436,7 +436,7 @@ unsafe impl AsBytes for RegModifyPayload {}
/// Wrapper for GSP sequencer register poll payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
+pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
impl RegPollPayload {
/// Returns the register address.
@@ -469,7 +469,7 @@ unsafe impl AsBytes for RegPollPayload {}
/// Wrapper for GSP sequencer delay payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
+pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
impl DelayUsPayload {
/// Returns the delay value in microseconds.
@@ -487,7 +487,7 @@ unsafe impl AsBytes for DelayUsPayload {}
/// Wrapper for GSP sequencer register store payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
+pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
impl RegStorePayload {
/// Returns the register address.
@@ -510,7 +510,7 @@ unsafe impl AsBytes for RegStorePayload {}
/// Wrapper for GSP sequencer buffer command.
#[repr(transparent)]
-pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD);
+pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
impl SequencerBufferCmd {
/// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
@@ -612,7 +612,7 @@ unsafe impl AsBytes for SequencerBufferCmd {}
/// Wrapper for GSP run CPU sequencer RPC.
#[repr(transparent)]
-pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00);
+pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
impl RunCpuSequencer {
/// Returns the command index.
@@ -797,13 +797,6 @@ impl bindings::rpc_message_header_v {
}
}
-// SAFETY: We can't derive the Zeroable trait for this binding because the
-// procedural macro doesn't support the syntax used by bindgen to create the
-// __IncompleteArrayField types. So instead we implement it here, which is safe
-// because these are explicitly padded structures only containing types for
-// which any bit pattern, including all zeros, is valid.
-unsafe impl Zeroable for bindings::rpc_message_header_v {}
-
/// GSP Message Element.
///
/// This is essentially a message header expected to be followed by the message data.
@@ -853,11 +846,16 @@ impl GspMsgElement {
self.inner.checkSum = checksum;
}
- /// Returns the total length of the message.
+ /// Returns the length of the message's payload.
+ pub(crate) fn payload_length(&self) -> usize {
+ // `rpc.length` includes the length of the RPC message header.
+ num::u32_as_usize(self.inner.rpc.length)
+ .saturating_sub(size_of::<bindings::rpc_message_header_v>())
+ }
+
+ /// Returns the total length of the message, message and RPC headers included.
pub(crate) fn length(&self) -> usize {
- // `rpc.length` includes the length of the GspRpcHeader but not the message header.
- size_of::<Self>() - size_of::<bindings::rpc_message_header_v>()
- + num::u32_as_usize(self.inner.rpc.length)
+ size_of::<Self>() + self.payload_length()
}
// Returns the sequence number of the message.
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
index 048234d1a9d1..e99d315ae74c 100644
--- a/drivers/gpu/nova-core/gsp/fw/r570_144.rs
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
@@ -24,8 +24,11 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
-use kernel::{
- ffi,
- prelude::Zeroable, //
-};
+use kernel::ffi;
+use pin_init::MaybeZeroable;
+
include!("r570_144/bindings.rs");
+
+// SAFETY: This type has a size of zero, so its inclusion into another type should not affect their
+// ability to implement `Zeroable`.
+unsafe impl<T> kernel::prelude::Zeroable for __IncompleteArrayField<T> {}
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
index 5bcfbcd1ad22..6d25fe0bffa9 100644
--- a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
@@ -320,11 +320,12 @@ pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130;
pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131;
pub type _bindgen_ty_3 = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub totalVFs: u32_,
pub firstVfOffset: u32_,
pub vfFeatureMask: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
pub FirstVFBar0Address: u64_,
pub FirstVFBar1Address: u64_,
pub FirstVFBar2Address: u64_,
@@ -340,23 +341,26 @@ pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub bClientRmAllocatedCtxBuffer: u8_,
pub bNonPowerOf2ChannelCountSupported: u8_,
pub bVfResizableBAR1Supported: u8_,
+ pub __bindgen_padding_1: [u8; 7usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
pub BoardID: u32_,
pub chipSKU: [ffi::c_char; 9usize],
pub chipSKUMod: [ffi::c_char; 5usize],
+ pub __bindgen_padding_0: [u8; 2usize],
pub skuConfigVersion: u32_,
pub project: [ffi::c_char; 5usize],
pub projectSKU: [ffi::c_char; 5usize],
pub CDP: [ffi::c_char; 6usize],
pub projectSKUMod: [ffi::c_char; 2usize],
+ pub __bindgen_padding_1: [u8; 2usize],
pub businessCycle: u32_,
}
pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize];
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub base: u64_,
pub limit: u64_,
@@ -368,13 +372,14 @@ pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
pub numFBRegions: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize],
}
#[repr(C)]
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
pub index: u32_,
pub flags: u32_,
@@ -391,14 +396,14 @@ impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
}
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct DOD_METHOD_DATA {
pub status: u32_,
pub acpiIdListLen: u32_,
pub acpiIdList: [u32_; 16usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct JT_METHOD_DATA {
pub status: u32_,
pub jtCaps: u32_,
@@ -407,14 +412,14 @@ pub struct JT_METHOD_DATA {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA_ELEMENT {
pub acpiId: u32_,
pub mode: u32_,
pub status: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA {
pub tableLen: u32_,
pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
@@ -422,13 +427,13 @@ pub struct MUX_METHOD_DATA {
pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct CAPS_METHOD_DATA {
pub status: u32_,
pub optimusCaps: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct ACPI_METHOD_DATA {
pub bValid: u8_,
pub __bindgen_padding_0: [u8; 3usize],
@@ -438,20 +443,20 @@ pub struct ACPI_METHOD_DATA {
pub capsMethodData: CAPS_METHOD_DATA,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS {
pub headIndex: u32_,
pub maxHResolution: u32_,
pub maxVResolution: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS {
pub numHeads: u32_,
pub maxNumHeads: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct BUSINFO {
pub deviceID: u16_,
pub vendorID: u16_,
@@ -461,7 +466,7 @@ pub struct BUSINFO {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_VF_INFO {
pub totalVFs: u32_,
pub firstVFOffset: u32_,
@@ -474,34 +479,37 @@ pub struct GSP_VF_INFO {
pub __bindgen_padding_0: [u8; 5usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_PCIE_CONFIG_REG {
pub linkCap: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct EcidManufacturingInfo {
pub ecidLow: u32_,
pub ecidHigh: u32_,
pub ecidExtended: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct FW_WPR_LAYOUT_OFFSET {
pub nonWprHeapOffset: u64_,
pub frtsOffset: u64_,
}
#[repr(C)]
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct GspStaticConfigInfo_t {
pub grCapsBits: [u8_; 23usize],
+ pub __bindgen_padding_0: u8,
pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS,
pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS,
+ pub __bindgen_padding_1: [u8; 4usize],
pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS,
pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS,
pub sriovMaxGfid: u32_,
pub engineCaps: [u32_; 3usize],
pub poisonFuseEnabled: u8_,
+ pub __bindgen_padding_2: [u8; 7usize],
pub fb_length: u64_,
pub fbio_mask: u64_,
pub fb_bus_width: u32_,
@@ -527,16 +535,20 @@ pub struct GspStaticConfigInfo_t {
pub bIsMigSupported: u8_,
pub RTD3GC6TotalBoardPower: u16_,
pub RTD3GC6PerstDelay: u16_,
+ pub __bindgen_padding_3: [u8; 2usize],
pub bar1PdeBase: u64_,
pub bar2PdeBase: u64_,
pub bVbiosValid: u8_,
+ pub __bindgen_padding_4: [u8; 3usize],
pub vbiosSubVendor: u32_,
pub vbiosSubDevice: u32_,
pub bPageRetirementSupported: u8_,
pub bSplitVasBetweenServerClientRm: u8_,
pub bClRootportNeedsNosnoopWAR: u8_,
+ pub __bindgen_padding_5: u8,
pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS,
pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS,
+ pub __bindgen_padding_6: [u8; 4usize],
pub displaylessMaxPixels: u64_,
pub hInternalClient: u32_,
pub hInternalDevice: u32_,
@@ -558,7 +570,7 @@ impl Default for GspStaticConfigInfo_t {
}
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspSystemInfo {
pub gpuPhysAddr: u64_,
pub gpuPhysFbAddr: u64_,
@@ -615,7 +627,7 @@ pub struct GspSystemInfo {
pub hostPageSize: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub sharedMemPhysAddr: u64_,
pub pageTableEntryCount: u32_,
@@ -624,7 +636,7 @@ pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub statQueueOffset: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SR_INIT_ARGUMENTS {
pub oldLevel: u32_,
pub flags: u32_,
@@ -632,7 +644,7 @@ pub struct GSP_SR_INIT_ARGUMENTS {
pub __bindgen_padding_0: [u8; 3usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED {
pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS,
pub srInitArguments: GSP_SR_INIT_ARGUMENTS,
@@ -642,13 +654,13 @@ pub struct GSP_ARGUMENTS_CACHED {
pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 {
pub pa: u64_,
pub size: u64_,
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union rpc_message_rpc_union_field_v03_00 {
pub spare: u32_,
pub cpuRmGfid: u32_,
@@ -664,6 +676,7 @@ impl Default for rpc_message_rpc_union_field_v03_00 {
}
pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00;
#[repr(C)]
+#[derive(MaybeZeroable)]
pub struct rpc_message_header_v03_00 {
pub header_version: u32_,
pub signature: u32_,
@@ -686,7 +699,7 @@ impl Default for rpc_message_header_v03_00 {
}
pub type rpc_message_header_v = rpc_message_header_v03_00;
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta {
pub magic: u64_,
pub revision: u64_,
@@ -721,19 +734,19 @@ pub struct GspFwWprMeta {
pub verified: u64_,
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_1 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 {
pub sysmemAddrOfSignature: u64_,
pub sizeOfSignature: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 {
pub gspFwHeapFreeListWprOffset: u32_,
pub unused0: u32_,
@@ -749,13 +762,13 @@ impl Default for GspFwWprMeta__bindgen_ty_1 {
}
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_2 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub partitionRpcAddr: u64_,
pub partitionRpcRequestOffset: u16_,
@@ -767,7 +780,7 @@ pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub lsUcodeVersion: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 {
pub partitionRpcPadding: [u32_; 4usize],
pub sysmemAddrOfCrashReportQueue: u64_,
@@ -802,7 +815,7 @@ pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegion
pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2;
pub type LibosMemoryRegionLoc = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct LibosMemoryRegionInitArgument {
pub id8: LibosAddress,
pub pa: LibosAddress,
@@ -812,7 +825,7 @@ pub struct LibosMemoryRegionInitArgument {
pub __bindgen_padding_0: [u8; 6usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct PACKED_REGISTRY_ENTRY {
pub nameOffset: u32_,
pub type_: u8_,
@@ -821,14 +834,14 @@ pub struct PACKED_REGISTRY_ENTRY {
pub length: u32_,
}
#[repr(C)]
-#[derive(Debug, Default)]
+#[derive(Debug, Default, MaybeZeroable)]
pub struct PACKED_REGISTRY_TABLE {
pub size: u32_,
pub numEntries: u32_,
pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqTxHeader {
pub version: u32_,
pub size: u32_,
@@ -840,13 +853,13 @@ pub struct msgqTxHeader {
pub entryOff: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqRxHeader {
pub readPtr: u32_,
}
#[repr(C)]
#[repr(align(8))]
-#[derive(Zeroable)]
+#[derive(MaybeZeroable)]
pub struct GSP_MSG_QUEUE_ELEMENT {
pub authTagBuffer: [u8_; 16usize],
pub aadBuffer: [u8_; 16usize],
@@ -866,7 +879,7 @@ impl Default for GSP_MSG_QUEUE_ELEMENT {
}
}
#[repr(C)]
-#[derive(Debug, Default)]
+#[derive(Debug, Default, MaybeZeroable)]
pub struct rpc_run_cpu_sequencer_v17_00 {
pub bufferSizeDWord: u32_,
pub cmdIndex: u32_,
@@ -884,20 +897,20 @@ pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_
pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8;
pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE {
pub addr: u32_,
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY {
pub addr: u32_,
pub mask: u32_,
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub addr: u32_,
pub mask: u32_,
@@ -906,24 +919,24 @@ pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub error: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US {
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE {
pub addr: u32_,
pub index: u32_,
}
#[repr(C)]
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQUENCER_BUFFER_CMD {
pub opCode: GSP_SEQ_BUF_OPCODE,
pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1,
}
#[repr(C)]
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE,
pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY,