summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c64
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c491
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c190
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c190
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c350
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h150
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c251
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c395
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c118
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c364
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c86
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c136
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h7
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c321
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c220
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README6
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c117
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_module.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c19
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig14
-rw-r--r--drivers/gpu/drm/xe/Makefile5
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h3
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c48
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c10
173 files changed, 3117 insertions, 1799 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index c491e3203bf1..4c350c7f5144 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -486,16 +486,16 @@ EXPORT_SYMBOL(drm_lspcon_get_mode);
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
+ * @time_out: LSPCON mode change settle timeout
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
- enum drm_lspcon_mode mode)
+ enum drm_lspcon_mode mode, int time_out)
{
u8 data = 0;
int ret;
- int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3dda9f0eda82..ed05b131ed3a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -231,6 +231,7 @@ i915-y += \
display/intel_bo.o \
display/intel_bw.o \
display/intel_cdclk.o \
+ display/intel_cmtg.o \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
@@ -346,6 +347,7 @@ i915-y += \
display/intel_pps.o \
display/intel_qp_tables.o \
display/intel_sdvo.o \
+ display/intel_snps_hdmi_pll.o \
display/intel_snps_phy.o \
display/intel_tv.o \
display/intel_vdsc.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 686393dfbbf5..04005cdd0461 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -517,7 +517,7 @@ static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
- * allowing reading from the chiop.
+ * allowing reading from the chip.
*/
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 48e657a80a16..ed171fbf8720 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -673,6 +673,11 @@ vlv_primary_disable_flip_done(struct intel_plane *plane)
spin_unlock_irq(&i915->irq_lock);
}
+static bool i9xx_plane_can_async_flip(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_X_TILED;
+}
+
static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
@@ -771,16 +776,15 @@ i8xx_plane_max_stride(struct intel_plane *plane,
return 8 * 1024;
}
-static unsigned int vlv_primary_min_alignment(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane)
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ if (intel_plane_can_async_flip(plane, fb->modifier))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
@@ -794,13 +798,11 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ if (intel_plane_can_async_flip(plane, fb->modifier))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
- return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 4 * 1024;
default:
@@ -937,7 +939,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_alignment = vlv_primary_min_alignment;
+ plane->min_alignment = vlv_plane_min_alignment;
else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
plane->min_alignment = g4x_primary_min_alignment;
else if (DISPLAY_VER(dev_priv) == 4)
@@ -955,23 +957,29 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->async_flip = vlv_primary_async_flip;
- plane->enable_flip_done = vlv_primary_enable_flip_done;
- plane->disable_flip_done = vlv_primary_disable_flip_done;
- } else if (IS_BROADWELL(dev_priv)) {
- plane->need_async_flip_toggle_wa = true;
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = bdw_primary_enable_flip_done;
- plane->disable_flip_done = bdw_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ivb_primary_enable_flip_done;
- plane->disable_flip_done = ivb_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ilk_primary_enable_flip_done;
- plane->disable_flip_done = ilk_primary_disable_flip_done;
+ if (HAS_ASYNC_FLIPS(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_toggle_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ }
}
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index 0ca12d1e6839..457f4bccf106 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
enum pipe;
+struct drm_framebuffer;
struct drm_i915_private;
struct intel_crtc;
struct intel_initial_plane_config;
@@ -19,6 +20,9 @@ struct intel_plane_state;
unsigned int i965_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index db78c1e6b0a3..497850a6ac81 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -446,7 +446,7 @@ static const struct intel_watermark_params i845_wm_info = {
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the short term drain rate
@@ -493,7 +493,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the long term drain rate
@@ -1562,7 +1562,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
/*
* When enabling sprite0 after sprite1 has already been enabled
* we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
+ * FIFO space allocated. Hence we always allocate at least one
* cacheline for sprite0 whenever sprite1 is enabled.
*
* All other plane enable sequences appear immune to this problem.
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c977b74f82f0..3bedaf1454b1 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -243,7 +243,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
- * table in BSPEC under DDI buffer programing
+ * table in BSPEC under DDI buffer programming.
*/
mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
@@ -961,7 +961,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/*
- * FIXME: Programing this by assuming progressive mode, since
+ * FIXME: Programming this by assuming progressive mode, since
* non-interlaced info from VBT is not saved inside
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 612e9b0ec14a..651f81ed85ab 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -156,6 +156,11 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(i915)->cursor_needs_physical;
}
+bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+{
+ return plane->can_async_flip && plane->can_async_flip(modifier);
+}
+
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate)
@@ -767,7 +772,7 @@ void intel_plane_update_noarm(struct intel_dsb *dsb,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_plane_update_noarm(plane, crtc);
+ trace_intel_plane_update_noarm(plane_state, crtc);
if (plane->update_noarm)
plane->update_noarm(dsb, plane, crtc_state, plane_state);
@@ -797,7 +802,7 @@ void intel_plane_update_arm(struct intel_dsb *dsb,
return;
}
- trace_intel_plane_update_arm(plane, crtc);
+ trace_intel_plane_update_arm(plane_state, crtc);
plane->update_arm(dsb, plane, crtc_state, plane_state);
}
@@ -1130,7 +1135,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (!obj)
return 0;
- ret = intel_plane_pin_fb(new_plane_state);
+ ret = intel_plane_pin_fb(new_plane_state, old_plane_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 0f982f452ff3..fb87b3353ab0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -19,6 +19,7 @@ struct intel_plane;
struct intel_plane_state;
enum plane_id;
+bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ce8a4319a63c..113d763e6ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -188,15 +188,15 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
* WA_14020863754: Implement Audio Workaround
* Corner case with Min Hblank Fix can cause audio hang
*/
-static bool needs_wa_14020863754(struct drm_i915_private *i915)
+static bool needs_wa_14020863754(struct intel_display *display)
{
- return (DISPLAY_VER(i915) == 20 || IS_BATTLEMAGE(i915));
+ return DISPLAY_VER(display) == 20 || display->platform.battlemage;
}
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -206,17 +206,17 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
- if (DISPLAY_VER(i915) < 12 && adjusted_mode->crtc_clock > 148500)
+ if (DISPLAY_VER(display) < 12 && adjusted_mode->crtc_clock > 148500)
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
i = 1;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
@@ -251,11 +251,11 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
/* ELD buffer size in dwords */
-static int g4x_eld_buffer_size(struct drm_i915_private *i915)
+static int g4x_eld_buffer_size(struct intel_display *display)
{
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
@@ -263,33 +263,33 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *eld = (u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
if ((tmp & G4X_ELD_VALID) == 0)
return;
- intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+ intel_de_rmw(display, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+ eld[i] = intel_de_read(display, G4X_HDMIW_HDMIEDID);
}
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID, 0);
intel_crtc_wait_for_next_vblank(crtc);
@@ -300,28 +300,28 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, eld[i]);
for (; i < eld_buffer_size; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, 0);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, 0);
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
0, G4X_ELD_VALID);
}
@@ -329,11 +329,11 @@ static void
hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Enable time stamps. Let HW calculate Maud/Naud values */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
AUD_CONFIG_UPPER_N_MASK |
@@ -347,8 +347,8 @@ static void
hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -356,7 +356,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -364,25 +364,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- drm_dbg_kms(&i915->drm, "using N %d\n", n);
+ drm_dbg_kms(display->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- drm_dbg_kms(&i915->drm, "using automatic N\n");
+ drm_dbg_kms(display->drm, "using automatic N\n");
}
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -399,14 +399,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -415,26 +415,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
/* Disable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
@@ -446,13 +446,13 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
- cdclk = i915->display.cdclk.hw.cdclk;
+ cdclk = display->cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n",
h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk);
@@ -497,19 +497,19 @@ static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
- val = intel_de_read(i915, AUD_CONFIG_BE);
+ val = intel_de_read(display, AUD_CONFIG_BE);
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
@@ -536,56 +536,58 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
- intel_de_write(i915, AUD_CONFIG_BE, val);
+ intel_de_write(display, AUD_CONFIG_BE, val);
}
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
/* Enable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
0, AUDIO_OUTPUT_ENABLE(cpu_transcoder));
intel_crtc_wait_for_next_vblank(crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ibx_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -607,21 +609,21 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -630,10 +632,10 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
@@ -643,32 +645,32 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_N_PROG_ENABLE |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK,
@@ -676,7 +678,7 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX :
audio_config_hdmi_pixel_clock(crtc_state)));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
@@ -693,14 +695,14 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
mutex_unlock(&connector->eld_mutex);
@@ -729,8 +731,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -740,26 +742,27 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
drm_eld_size(crtc_state->eld));
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_enable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_enable(encoder,
crtc_state,
conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = encoder;
BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -770,7 +773,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
+ intel_lpe_audio_notify(display, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -788,8 +791,8 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -799,24 +802,25 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name);
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_disable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = NULL;
memset(audio_state->eld, 0, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -827,36 +831,36 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
+ intel_lpe_audio_notify(display, cpu_transcoder, port, NULL, 0, false);
}
static void intel_acomp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
if (audio_state->encoder)
memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->has_audio)
return;
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_get_config(encoder, crtc_state);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
@@ -879,17 +883,19 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
/**
* intel_audio_hooks_init - Set up chip specific audio hooks
- * @i915: device private
+ * @display: display device
*/
-void intel_audio_hooks_init(struct drm_i915_private *i915)
+void intel_audio_hooks_init(struct intel_display *display)
{
- if (IS_G4X(i915))
- i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.g4x)
+ display->funcs.audio = &g4x_audio_funcs;
+ else if (display->platform.valleyview || display->platform.cherryview ||
HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
- i915->display.funcs.audio = &ibx_audio_funcs;
- else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
- i915->display.funcs.audio = &hsw_audio_funcs;
+ display->funcs.audio = &ibx_audio_funcs;
+ else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
+ display->funcs.audio = &hsw_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -897,10 +903,10 @@ struct aud_ts_cdclk_m_n {
u16 n;
};
-void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_pre(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
- intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
+ if (DISPLAY_VER(display) >= 13)
+ intel_de_rmw(display, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
}
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
@@ -909,16 +915,18 @@ static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n
aud_ts->n = cdclk * aud_ts->m / 24000;
}
-void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_post(struct intel_display *display)
{
struct aud_ts_cdclk_m_n aud_ts;
- if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
+ if (DISPLAY_VER(display) >= 13) {
+ get_aud_ts_cdclk_m_n(display->cdclk.hw.ref,
+ display->cdclk.hw.cdclk, &aud_ts);
- intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
- intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
- drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_N, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
+ drm_dbg_kms(display->drm, "aud_ts_cdclk set to M=%u, N=%u\n",
+ aud_ts.m, aud_ts.n);
}
}
@@ -943,9 +951,10 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
return drm_atomic_commit(&state->base);
}
-static void glk_force_audio_cdclk(struct drm_i915_private *i915,
+static void glk_force_audio_cdclk(struct intel_display *display,
bool enable)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct intel_crtc *crtc;
@@ -956,8 +965,8 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915,
return;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&i915->drm);
- if (drm_WARN_ON(&i915->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
state->acquire_ctx = &ctx;
@@ -972,7 +981,7 @@ retry:
goto retry;
}
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
drm_atomic_state_put(state);
@@ -983,7 +992,6 @@ retry:
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int min_cdclk = 0;
if (!crtc_state->has_audio)
@@ -1000,7 +1008,7 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) == 10) {
/* Display WA #1145: glk */
min_cdclk = max(min_cdclk, 316800);
- } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) == 9 || display->platform.broadwell) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(min_cdclk, 432000);
}
@@ -1020,14 +1028,14 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
* 270 | 320 or higher
* 162 | 200 or higher"
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
intel_crtc_has_dp_encoder(crtc_state))
min_cdclk = max(min_cdclk, crtc_state->port_clock);
return min_cdclk;
}
-static unsigned long i915_audio_component_get_power(struct device *kdev)
+static unsigned long intel_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
struct drm_i915_private *i915 = to_i915(display->drm);
@@ -1038,81 +1046,79 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (i915->display.audio.power_refcount++ == 0) {
- if (DISPLAY_VER(i915) >= 9) {
- intel_de_write(i915, AUD_FREQ_CNTRL,
- i915->display.audio.freq_cntrl);
- drm_dbg_kms(&i915->drm,
+ if (display->audio.power_refcount++ == 0) {
+ if (DISPLAY_VER(display) >= 9) {
+ intel_de_write(display, AUD_FREQ_CNTRL,
+ display->audio.freq_cntrl);
+ drm_dbg_kms(display->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- i915->display.audio.freq_cntrl);
+ display->audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, true);
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, true);
- if (DISPLAY_VER(i915) >= 10)
- intel_de_rmw(i915, AUD_PIN_BUF_CTL,
+ if (DISPLAY_VER(display) >= 10)
+ intel_de_rmw(display, AUD_PIN_BUF_CTL,
0, AUD_PIN_BUF_ENABLE);
}
return (unsigned long)wakeref;
}
-static void i915_audio_component_put_power(struct device *kdev,
- unsigned long cookie)
+static void intel_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
{
struct intel_display *display = to_intel_display(kdev);
struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref = (intel_wakeref_t)cookie;
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--i915->display.audio.power_refcount == 0)
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, false);
+ if (--display->audio.power_refcount == 0)
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, false);
intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
}
-static void i915_audio_component_codec_wake_override(struct device *kdev,
- bool enable)
+static void intel_audio_component_codec_wake_override(struct device *kdev,
+ bool enable)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
- if (DISPLAY_VER(i915) < 9)
+ if (DISPLAY_VER(display) < 9)
return;
- cookie = i915_audio_component_get_power(kdev);
+ cookie = intel_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
SKL_AUD_CODEC_WAKE_SIGNAL, 0);
usleep_range(1000, 1500);
if (enable) {
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
0, SKL_AUD_CODEC_WAKE_SIGNAL);
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev, cookie);
+ intel_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
-static int i915_audio_component_get_cdclk_freq(struct device *kdev)
+static int intel_audio_component_get_cdclk_freq(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
+ if (drm_WARN_ON_ONCE(display->drm, !HAS_DDI(display)))
return -ENODEV;
- return i915->display.cdclk.hw.cdclk;
+ return display->cdclk.hw.cdclk;
}
/*
@@ -1124,7 +1130,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
* will get the right intel_encoder with port matched
* Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
-static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+static struct intel_audio_state *find_audio_state(struct intel_display *display,
int port, int cpu_transcoder)
{
/* MST */
@@ -1132,11 +1138,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- if (drm_WARN_ON(&i915->drm,
- cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
+ if (drm_WARN_ON(display->drm,
+ cpu_transcoder >= ARRAY_SIZE(display->audio.state)))
return NULL;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1148,11 +1154,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
if (cpu_transcoder > 0)
return NULL;
- for_each_cpu_transcoder(i915, cpu_transcoder) {
+ for_each_cpu_transcoder(display, cpu_transcoder) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1163,27 +1169,27 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
return NULL;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int cpu_transcoder, int rate)
+static int intel_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int cpu_transcoder, int rate)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct i915_audio_component *acomp = display->audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
int err = 0;
- if (!HAS_DDI(i915))
+ if (!HAS_DDI(display))
return 0;
- cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&i915->display.audio.mutex);
+ cookie = intel_audio_component_get_power(kdev);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1200,26 +1206,26 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&i915->display.audio.mutex);
- i915_audio_component_put_power(kdev, cookie);
+ mutex_unlock(&display->audio.mutex);
+ intel_audio_component_put_power(kdev, cookie);
return err;
}
-static int i915_audio_component_get_eld(struct device *kdev, int port,
- int cpu_transcoder, bool *enabled,
- unsigned char *buf, int max_bytes)
+static int intel_audio_component_get_eld(struct device *kdev, int port,
+ int cpu_transcoder, bool *enabled,
+ unsigned char *buf, int max_bytes)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
- mutex_unlock(&i915->display.audio.mutex);
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
+ mutex_unlock(&display->audio.mutex);
return -EINVAL;
}
@@ -1231,71 +1237,70 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
return ret;
}
-static const struct drm_audio_component_ops i915_audio_component_ops = {
- .owner = THIS_MODULE,
- .get_power = i915_audio_component_get_power,
- .put_power = i915_audio_component_put_power,
- .codec_wake_override = i915_audio_component_codec_wake_override,
- .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
- .sync_audio_rate = i915_audio_component_sync_audio_rate,
- .get_eld = i915_audio_component_get_eld,
+static const struct drm_audio_component_ops intel_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = intel_audio_component_get_power,
+ .put_power = intel_audio_component_put_power,
+ .codec_wake_override = intel_audio_component_codec_wake_override,
+ .get_cdclk_freq = intel_audio_component_get_cdclk_freq,
+ .sync_audio_rate = intel_audio_component_sync_audio_rate,
+ .get_eld = intel_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static int intel_audio_component_bind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
int i;
- if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(display->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(&i915->drm);
- acomp->base.ops = &i915_audio_component_ops;
+ drm_modeset_lock_all(display->drm);
+ acomp->base.ops = &intel_audio_component_ops;
acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- i915->display.audio.component = acomp;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = acomp;
+ drm_modeset_unlock_all(display->drm);
return 0;
}
-static void i915_audio_component_unbind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static void intel_audio_component_unbind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- drm_modeset_lock_all(&i915->drm);
+ drm_modeset_lock_all(display->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- i915->display.audio.component = NULL;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = NULL;
+ drm_modeset_unlock_all(display->drm);
device_link_remove(hda_kdev, drv_kdev);
- if (i915->display.audio.power_refcount)
- drm_err(&i915->drm, "audio power refcount %d after unbind\n",
- i915->display.audio.power_refcount);
+ if (display->audio.power_refcount)
+ drm_err(display->drm,
+ "audio power refcount %d after unbind\n",
+ display->audio.power_refcount);
}
-static const struct component_ops i915_audio_component_bind_ops = {
- .bind = i915_audio_component_bind,
- .unbind = i915_audio_component_unbind,
+static const struct component_ops intel_audio_component_bind_ops = {
+ .bind = intel_audio_component_bind,
+ .unbind = intel_audio_component_unbind,
};
#define AUD_FREQ_TMODE_SHIFT 14
@@ -1308,8 +1313,8 @@ static const struct component_ops i915_audio_component_bind_ops = {
#define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M)
/**
- * i915_audio_component_init - initialize and register the audio component
- * @i915: i915 device instance
+ * intel_audio_component_init - initialize and register the audio component
+ * @display: display device
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
@@ -1323,93 +1328,93 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-static void i915_audio_component_init(struct drm_i915_private *i915)
+static void intel_audio_component_init(struct intel_display *display)
{
u32 aud_freq, aud_freq_init;
- if (DISPLAY_VER(i915) >= 9) {
- aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
+ if (DISPLAY_VER(display) >= 9) {
+ aud_freq_init = intel_de_read(display, AUD_FREQ_CNTRL);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
aud_freq = AUD_FREQ_GEN12;
else
aud_freq = aud_freq_init;
/* use BIOS provided value for TGL and RKL unless it is a known bad value */
- if ((IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) &&
+ if ((display->platform.tigerlake || display->platform.rocketlake) &&
aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
- drm_dbg_kms(&i915->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ drm_dbg_kms(display->drm,
+ "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- i915->display.audio.freq_cntrl = aud_freq;
+ display->audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
- intel_audio_cdclk_change_post(i915);
+ intel_audio_cdclk_change_post(display);
}
-static void i915_audio_component_register(struct drm_i915_private *i915)
+static void intel_audio_component_register(struct intel_display *display)
{
int ret;
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
+ ret = component_add_typed(display->drm->dev,
+ &intel_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
- i915->display.audio.component_registered = true;
+ display->audio.component_registered = true;
}
/**
- * i915_audio_component_cleanup - deregister the audio component
- * @i915: i915 device instance
+ * intel_audio_component_cleanup - deregister the audio component
+ * @display: display device
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-static void i915_audio_component_cleanup(struct drm_i915_private *i915)
+static void intel_audio_component_cleanup(struct intel_display *display)
{
- if (!i915->display.audio.component_registered)
+ if (!display->audio.component_registered)
return;
- component_del(i915->drm.dev, &i915_audio_component_bind_ops);
- i915->display.audio.component_registered = false;
+ component_del(display->drm->dev, &intel_audio_component_bind_ops);
+ display->audio.component_registered = false;
}
/**
* intel_audio_init() - Initialize the audio driver either using
* component framework or using lpe audio bridge
- * @i915: the i915 drm device private data
+ * @display: display device
*
*/
-void intel_audio_init(struct drm_i915_private *i915)
+void intel_audio_init(struct intel_display *display)
{
- if (intel_lpe_audio_init(i915) < 0)
- i915_audio_component_init(i915);
+ if (intel_lpe_audio_init(display) < 0)
+ intel_audio_component_init(display);
}
-void intel_audio_register(struct drm_i915_private *i915)
+void intel_audio_register(struct intel_display *display)
{
- if (!i915->display.audio.lpe.platdev)
- i915_audio_component_register(i915);
+ if (!display->audio.lpe.platdev)
+ intel_audio_component_register(display);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
- * @i915: the i915 drm device private data
- *
+ * @display: display device
*/
-void intel_audio_deinit(struct drm_i915_private *i915)
+void intel_audio_deinit(struct intel_display *display)
{
- if (i915->display.audio.lpe.platdev != NULL)
- intel_lpe_audio_teardown(i915);
+ if (display->audio.lpe.platdev)
+ intel_lpe_audio_teardown(display);
else
- i915_audio_component_cleanup(i915);
+ intel_audio_component_cleanup(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 1bafc155434a..ad49eefa7182 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -9,11 +9,11 @@
#include <linux/types.h>
struct drm_connector_state;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
-void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+void intel_audio_hooks_init(struct intel_display *display);
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
@@ -25,12 +25,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state);
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
-void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+void intel_audio_cdclk_change_pre(struct intel_display *display);
+void intel_audio_cdclk_change_post(struct intel_display *display);
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_register(struct drm_i915_private *i915);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
+void intel_audio_init(struct intel_display *display);
+void intel_audio_register(struct intel_display *display);
+void intel_audio_deinit(struct intel_display *display);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index fc1e517e074a..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -41,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c7a603589412..b520231833b7 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2250,7 +2250,7 @@ static void bxt_sanitize_cdclk(struct intel_display *display)
/*
* Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
+ * dividers both syncing to an active pipe, or asynchronously
* (PIPE_NONE).
*/
cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE);
@@ -2521,7 +2521,6 @@ static void intel_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe, const char *context)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config))
@@ -2538,7 +2537,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_pause(intel_dp);
}
- intel_audio_cdclk_change_pre(dev_priv);
+ intel_audio_cdclk_change_pre(display);
/*
* Lock aux/gmbus while we change cdclk in case those
@@ -2568,7 +2567,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_resume(intel_dp);
}
- intel_audio_cdclk_change_post(dev_priv);
+ intel_audio_cdclk_change_post(display);
if (drm_WARN(display->drm,
intel_cdclk_changed(&display->cdclk.hw, cdclk_config),
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
new file mode 100644
index 000000000000..6b6fb82009f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_crtc.h"
+#include "intel_cmtg.h"
+#include "intel_cmtg_regs.h"
+#include "intel_de.h"
+#include "intel_display_device.h"
+#include "intel_display_power.h"
+
+/**
+ * DOC: Common Primary Timing Generator (CMTG)
+ *
+ * The CMTG is a timing generator that runs in parallel to transcoders timing
+ * generators (TG) to provide a synchronization mechanism where CMTG acts as
+ * primary and transcoders TGs act as secondary to the CMTG. The CMTG outputs
+ * its TG start and frame sync signals to the transcoders that are configured
+ * as secondary, which use those signals to synchronize their own timing with
+ * the CMTG's.
+ *
+ * The CMTG can be used only with eDP or MIPI command mode and supports the
+ * following use cases:
+ *
+ * - Dual eDP: The CMTG can be used to keep two eDP TGs in sync when on a
+ * dual eDP configuration (with or without PSR/PSR2 enabled).
+ *
+ * - Single eDP as secondary: It is also possible to use a single eDP
+ * configuration with the transcoder TG as secondary to the CMTG. That would
+ * allow a flow that would not require a modeset on the existing eDP when a
+ * new eDP is added for a dual eDP configuration with CMTG.
+ *
+ * - DC6v: In DC6v, the transcoder might be off but the CMTG keeps running to
+ * maintain frame timings. When exiting DC6v, the transcoder TG then is
+ * synced back the CMTG.
+ *
+ * Currently, the driver does not use the CMTG, but we need to make sure that
+ * we disable it in case we inherit a display configuration with it enabled.
+ */
+
+/*
+ * We describe here only the minimum data required to allow us to properly
+ * disable the CMTG if necessary.
+ */
+struct intel_cmtg_config {
+ bool cmtg_a_enable;
+ /*
+ * Xe2_LPD adds a second CMTG that can be used for dual eDP async mode.
+ */
+ bool cmtg_b_enable;
+ bool trans_a_secondary;
+ bool trans_b_secondary;
+};
+
+static bool intel_cmtg_has_cmtg_b(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 20;
+}
+
+static bool intel_cmtg_has_clock_sel(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 14;
+}
+
+static void intel_cmtg_dump_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ drm_dbg_kms(display->drm,
+ "CMTG readout: CMTG A: %s, CMTG B: %s, Transcoder A secondary: %s, Transcoder B secondary: %s\n",
+ str_enabled_disabled(cmtg_config->cmtg_a_enable),
+ intel_cmtg_has_cmtg_b(display) ? str_enabled_disabled(cmtg_config->cmtg_b_enable) : "n/a",
+ str_yes_no(cmtg_config->trans_a_secondary),
+ str_yes_no(cmtg_config->trans_b_secondary));
+}
+
+static bool intel_cmtg_transcoder_is_secondary(struct intel_display *display,
+ enum transcoder trans)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 val = 0;
+
+ if (!HAS_TRANSCODER(display, trans))
+ return false;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(trans);
+
+ with_intel_display_power_if_enabled(i915, power_domain, wakeref)
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, trans));
+
+ return val & CMTG_SECONDARY_MODE;
+}
+
+static void intel_cmtg_get_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 val;
+
+ val = intel_de_read(display, TRANS_CMTG_CTL_A);
+ cmtg_config->cmtg_a_enable = val & CMTG_ENABLE;
+
+ if (intel_cmtg_has_cmtg_b(display)) {
+ val = intel_de_read(display, TRANS_CMTG_CTL_B);
+ cmtg_config->cmtg_b_enable = val & CMTG_ENABLE;
+ }
+
+ cmtg_config->trans_a_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_A);
+ cmtg_config->trans_b_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_B);
+}
+
+static bool intel_cmtg_disable_requires_modeset(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ if (DISPLAY_VER(display) >= 20)
+ return false;
+
+ return cmtg_config->trans_a_secondary || cmtg_config->trans_b_secondary;
+}
+
+static void intel_cmtg_disable(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 clk_sel_clr = 0;
+ u32 clk_sel_set = 0;
+
+ if (cmtg_config->trans_a_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_A),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->trans_b_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_B),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->cmtg_a_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG A\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_A, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_A_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_A_DISABLED;
+ }
+
+ if (cmtg_config->cmtg_b_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG B\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_B, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_B_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_B_DISABLED;
+ }
+
+ if (intel_cmtg_has_clock_sel(display) && clk_sel_clr)
+ intel_de_rmw(display, CMTG_CLK_SEL, clk_sel_clr, clk_sel_set);
+}
+
+/*
+ * Read out CMTG configuration and, on platforms that allow disabling it without
+ * a modeset, do it.
+ *
+ * This function must be called before any port PLL is disabled in the general
+ * sanitization process, because we need whatever port PLL that is providing the
+ * clock for CMTG to be on before accessing CMTG registers.
+ */
+void intel_cmtg_sanitize(struct intel_display *display)
+{
+ struct intel_cmtg_config cmtg_config = {};
+
+ if (!HAS_CMTG(display))
+ return;
+
+ intel_cmtg_get_config(display, &cmtg_config);
+ intel_cmtg_dump_config(display, &cmtg_config);
+
+ /*
+ * FIXME: The driver is not prepared to handle cases where a modeset is
+ * required for disabling the CMTG: we need a proper way of tracking
+ * CMTG state and do the right syncronization with respect to triggering
+ * the modeset as part of the disable sequence.
+ */
+ if (intel_cmtg_disable_requires_modeset(display, &cmtg_config))
+ return;
+
+ intel_cmtg_disable(display, &cmtg_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.h b/drivers/gpu/drm/i915/display/intel_cmtg.h
new file mode 100644
index 000000000000..ba62199adaa2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_H__
+#define __INTEL_CMTG_H__
+
+struct intel_display;
+
+void intel_cmtg_sanitize(struct intel_display *display);
+
+#endif /* __INTEL_CMTG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
new file mode 100644
index 000000000000..668e41d65e86
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_REGS_H__
+#define __INTEL_CMTG_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define CMTG_CLK_SEL _MMIO(0x46160)
+#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
+#define CMTG_CLK_SEL_A_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_A_MASK, 0)
+#define CMTG_CLK_SEL_B_MASK REG_GENMASK(15, 13)
+#define CMTG_CLK_SEL_B_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_B_MASK, 0)
+
+#define TRANS_CMTG_CTL_A _MMIO(0x6fa88)
+#define TRANS_CMTG_CTL_B _MMIO(0x6fb88)
+#define CMTG_ENABLE REG_BIT(31)
+
+#endif /* __INTEL_CMTG_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 2f51eccdb27a..8400a97f7e43 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -998,7 +998,7 @@ static void skl_color_commit_noarm(struct intel_dsb *dsb,
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
- * register programming will be peformed from skl_color_commit_arm()
+ * register programming will be performed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4634d3fd9f20..bc724dc5b4ea 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -745,8 +745,10 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
transconf | TRANSCONF_FORCE_BORDER);
intel_de_posting_read(display,
TRANSCONF(display, cpu_transcoder));
- /* Wait for next Vblank to substitue
- * border color for Color info */
+ /*
+ * Wait for next Vblank to substitute
+ * border color for Color info.
+ */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
st00 = intel_de_read8(display, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index c910168602d2..e69b28779ac5 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -96,7 +96,7 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * From Gen 11, in case of dsi cmd mode, frame counter wouldn't
* have updated at the beginning of TE, if we want to use
* the hw counter, then we would find it updated in only
* the next TE, hence switching to sw counter.
@@ -714,7 +714,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
- intel_vrr_send_push(new_crtc_state);
+ intel_vrr_send_push(NULL, new_crtc_state);
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 1faef60be472..ecd0d9853c60 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -10,6 +10,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -248,11 +249,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
str_enabled_disabled(pipe_config->has_sel_update),
str_enabled_disabled(pipe_config->has_panel_replay),
str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "minimum HBlank: %d\n", pipe_config->min_hblank);
}
- drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
-
drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
@@ -286,13 +285,23 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ drm_printf(&p, "scanline offset: %d\n",
+ intel_crtc_scanline_offset(pipe_config));
+
+ drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->hw.adjusted_mode.crtc_vblank_start -
+ pipe_config->hw.adjusted_mode.crtc_vdisplay,
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
+ drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
+ pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
+
+ drm_printf(&p, "vrr: vmin vblank: %d, vmax vblank: %d, vmin vtotal: %d, vmax vtotal: %d\n",
+ intel_vrr_vmin_vblank_start(pipe_config), intel_vrr_vmax_vblank_start(pipe_config),
+ intel_vrr_vmin_vtotal(pipe_config), intel_vrr_vmax_vtotal(pipe_config));
drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.mode));
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 57cf8f46a458..48c3d212f690 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -680,7 +680,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
* CURPOS.
*
* On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
+ * CURBASE write to arm the update. Additionally
* a write to any of the cursor register will cancel
* an already armed cursor update. Thus leaving out
* the CURBASE write after CURPOS could lead to a
@@ -865,7 +865,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(new_plane_state);
+ ret = intel_plane_pin_fb(new_plane_state, old_plane_state);
if (ret)
goto out_free;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index e768dc6a15b3..48b0b9755b2b 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -18,6 +18,7 @@
#include "intel_hdmi.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
#define MB_WRITE_COMMITTED true
@@ -2003,19 +2004,6 @@ static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
NULL,
};
-static int intel_c10_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
-}
-
static const struct intel_c10pll_state * const *
intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2033,21 +2021,27 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
return NULL;
}
-static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
+static void intel_cx0pll_update_ssc(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
- int i;
if (intel_crtc_has_dp_encoder(crtc_state)) {
if (intel_panel_use_ssc(display)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
pll_state->ssc_enabled =
(intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
}
}
+}
+
+static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
+ int i;
if (pll_state->ssc_enabled)
return;
@@ -2070,6 +2064,7 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
+ intel_cx0pll_update_ssc(crtc_state, encoder);
intel_c10pll_update_pll(crtc_state, encoder);
crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
@@ -2077,6 +2072,16 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
}
}
+ /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ intel_snps_hdmi_pll_compute_c10pll(&crtc_state->dpll_hw_state.cx0pll.c10,
+ crtc_state->port_clock);
+ intel_c10pll_update_pll(crtc_state, encoder);
+ crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
+
+ return 0;
+ }
+
return -EINVAL;
}
@@ -2173,9 +2178,47 @@ static void intel_c10pll_dump_hw_state(struct intel_display *display,
i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
}
-static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+/*
+ * Some ARLs SoCs have the same drm PCI IDs, so need a helper to differentiate based
+ * on the host bridge device ID to get the correct txx_mics value.
+ */
+static bool is_arrowlake_s_by_host_bridge(void)
+{
+ struct pci_dev *pdev = NULL;
+ u16 host_bridge_pci_dev_id;
+
+ while ((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, pdev)))
+ host_bridge_pci_dev_id = pdev->device;
+
+ return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id);
+}
+
+static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ u16 tx_misc;
+ u16 tx_dcc_cal_dac_ctrl_range = 8;
+ u16 tx_term_ctrl = 2;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tx_misc = 5;
+ tx_term_ctrl = 4;
+ } else if (display->platform.battlemage) {
+ tx_misc = 0;
+ } else if (display->platform.meteorlake_u ||
+ is_arrowlake_s_by_host_bridge()) {
+ tx_misc = 3;
+ } else {
+ tx_misc = 7;
+ }
+
+ return (C20_PHY_TX_MISC(tx_misc) |
+ C20_PHY_TX_DCC_CAL_RANGE(tx_dcc_cal_dac_ctrl_range) |
+ C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl));
+}
+
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+{
struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
u64 datarate;
u64 mpll_tx_clk_div;
@@ -2185,7 +2228,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
u64 mpll_multiplier;
u64 mpll_fracn_quot;
u64 mpll_fracn_rem;
- u16 tx_misc;
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
@@ -2205,11 +2247,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)),
datarate), 255);
- if (DISPLAY_VER(display) >= 20)
- tx_misc = 0x5;
- else
- tx_misc = 0x0;
-
if (vco_freq <= DATARATE_3000000000)
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3;
else if (vco_freq <= DATARATE_3500000000)
@@ -2221,7 +2258,7 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc);
+ pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2249,31 +2286,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_c20_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- if (clock >= 25175 && clock <= 594000)
- return MODE_OK;
-
- return MODE_CLOCK_RANGE;
-}
-
-int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
-{
- struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
-
- if (intel_encoder_is_c10phy(&dig_port->base))
- return intel_c10_phy_check_hdmi_link_rate(clock);
- return intel_c20_phy_check_hdmi_link_rate(clock);
-}
-
static const struct intel_c20pll_state * const *
intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2322,6 +2334,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
+ intel_cx0pll_update_ssc(crtc_state, encoder);
crtc_state->dpll_hw_state.cx0pll.use_c10 = false;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 711168882684..573fa7d3e88f 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -41,7 +41,6 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index da154ff26b96..4a3cf08007e3 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -218,10 +218,34 @@
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
+#define C10_PLL0_SSC_EN REG_BIT8(0)
+#define C10_PLL0_DIVCLK_EN REG_BIT8(1)
+#define C10_PLL0_DIV5CLK_EN REG_BIT8(2)
+#define C10_PLL0_WORDDIV2_EN REG_BIT8(3)
#define C10_PLL0_FRACEN REG_BIT8(4)
+#define C10_PLL0_PMIX_EN REG_BIT8(5)
+#define C10_PLL0_ANA_FREQ_VCO_MASK REG_GENMASK8(7, 6)
+#define C10_PLL1_DIV_MULTIPLIER_MASK REG_GENMASK8(7, 0)
+#define C10_PLL2_MULTIPLIERL_MASK REG_GENMASK8(7, 0)
#define C10_PLL3_MULTIPLIERH_MASK REG_GENMASK8(3, 0)
+#define C10_PLL8_SSC_UP_SPREAD REG_BIT8(5)
+#define C10_PLL9_FRACN_DENL_MASK REG_GENMASK8(7, 0)
+#define C10_PLL10_FRACN_DENH_MASK REG_GENMASK8(7, 0)
+#define C10_PLL11_FRACN_QUOT_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL12_FRACN_QUOT_H_MASK REG_GENMASK8(7, 0)
+#define C10_PLL13_FRACN_REM_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL14_FRACN_REM_H_MASK REG_GENMASK8(7, 0)
#define C10_PLL15_TXCLKDIV_MASK REG_GENMASK8(2, 0)
#define C10_PLL15_HDMIDIV_MASK REG_GENMASK8(5, 3)
+#define C10_PLL15_PIXELCLKDIV_MASK REG_GENMASK8(7, 6)
+#define C10_PLL16_ANA_CPINT REG_GENMASK8(6, 0)
+#define C10_PLL16_ANA_CPINTGS_L REG_BIT8(7)
+#define C10_PLL17_ANA_CPINTGS_H_MASK REG_GENMASK8(5, 0)
+#define C10_PLL17_ANA_CPPROP_L_MASK REG_GENMASK8(7, 6)
+#define C10_PLL18_ANA_CPPROP_H_MASK REG_GENMASK8(4, 0)
+#define C10_PLL18_ANA_CPPROPGS_L_MASK REG_GENMASK8(7, 5)
+#define C10_PLL19_ANA_CPPROPGS_H_MASK REG_GENMASK8(3, 0)
+#define C10_PLL19_ANA_V2I_MASK REG_GENMASK8(5, 4)
#define PHY_C10_VDR_CMN(idx) (0xC20 + (idx))
#define C10_CMN0_REF_RANGE REG_FIELD_PREP(REG_GENMASK(4, 0), 1)
@@ -298,6 +322,12 @@
#define C20_PHY_TX_RATE REG_GENMASK(2, 0)
#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0)
#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val))
+#define C20_PHY_TX_DCC_CAL_RANGE_MASK REG_GENMASK16(11, 8)
+#define C20_PHY_TX_DCC_CAL_RANGE(val) \
+ REG_FIELD_PREP16(C20_PHY_TX_DCC_CAL_RANGE_MASK, (val))
+#define C20_PHY_TX_DCC_BYPASS REG_BIT(12)
+#define C20_PHY_TX_TERM_CTL_MASK REG_GENMASK16(15, 13)
+#define C20_PHY_TX_TERM_CTL(val) REG_FIELD_PREP16(C20_PHY_TX_TERM_CTL_MASK, (val))
#define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index acb986bc1f33..dc319f37b1be 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2121,9 +2121,20 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
}
static void
+tgl_dkl_phy_check_and_rewrite(struct drm_i915_private *dev_priv,
+ enum tc_port tc_port, u32 ln0, u32 ln1)
+{
+ if (ln0 != intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 0)))
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0);
+ if (ln1 != intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 1)))
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1);
+}
+
+static void
icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
u32 ln0, ln1, pin_assignment;
@@ -2201,6 +2212,10 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
if (DISPLAY_VER(dev_priv) >= 12) {
intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0);
intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1);
+ /* WA_14018221282 */
+ if (IS_DISPLAY_VER(display, 12, 13))
+ tgl_dkl_phy_check_and_rewrite(dev_priv, tc_port, ln0, ln1);
+
} else {
intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
@@ -2927,8 +2942,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
/* Panel replay has to be enabled in sink dpcd before link training. */
- if (crtc_state->has_panel_replay)
- intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
+ intel_psr_panel_replay_enable_sink(enc_to_intel_dp(encoder));
if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2985,7 +2999,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
* - crtc_state will be the state of the first stream to be activated on this
* port, and it may not be the same stream that will be deactivated last, but
* each stream should have a state that is identical when it comes to the DP
- * link parameteres
+ * link parameters.
*/
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3285,7 +3299,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
* be deactivated on this port, and it may not be the same
* stream that was activated last, but each stream
* should have a state that is identical when it comes to
- * the DP link parameteres
+ * the DP link parameters
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 4271da219b41..60867b5b03ec 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1013,7 +1013,7 @@ static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
{
if (DISPLAY_VER(i915) == 9) {
/*
- * "Plane N strech max must be programmed to 11b (x1)
+ * "Plane N stretch max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
@@ -2610,28 +2610,71 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state)
+static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay &&
IS_DISPLAY_VER(display, 13, 14);
}
-static int intel_crtc_compute_config(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int vblank_delay = 0;
+
+ if (!HAS_DSB(display))
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_crtc_needs_wa_14015401596(crtc_state))
+ vblank_delay = max(vblank_delay, 1);
+
+ /*
+ * Add a minimal vblank delay to make sure the push
+ * doesn't race with the "wait for safe window" used
+ * for frame completion with DSB.
+ */
+ if (intel_vrr_possible(crtc_state))
+ vblank_delay = max(vblank_delay, 1);
+
+ return vblank_delay;
+}
+
+static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int vblank_delay, max_vblank_delay;
+
+ vblank_delay = intel_crtc_vblank_delay(crtc_state);
+ max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
+
+ if (vblank_delay > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay);
+ return -EINVAL;
+ }
+
+ adjusted_mode->crtc_vblank_start += vblank_delay;
+
+ return 0;
+}
+
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- adjusted_mode->crtc_vblank_start += 1;
+ ret = intel_crtc_compute_vblank_delay(state, crtc);
+ if (ret)
+ return ret;
ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
@@ -2802,6 +2845,8 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
@@ -2883,12 +2928,30 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
- drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ intel_de_write(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
/*
* The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
@@ -3529,7 +3592,7 @@ static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
/*
- * We currently do not free assignements of panel fitters on
+ * We currently do not free assignments of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now.
*/
@@ -4229,7 +4292,7 @@ int intel_dotclock_calculate(int link_freq,
/*
* The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
- * But we want to avoid losing precison if possible, so:
+ * But we want to avoid losing precision if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and for link freq (10kbs units) -> pixel clock it is:
@@ -5303,6 +5366,19 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
intel_cx0pll_dump_hw_state(display, b);
}
+static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ /*
+ * Allow fastboot to fix up vblank delay (handled via LRR
+ * codepaths), a bit dodgy as the registers aren't
+ * double buffered but seems to be working more or less...
+ */
+ return HAS_LRR(display) && old_crtc_state->inherited &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
@@ -5435,7 +5511,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
- PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ if (!fastset || !allow_vblank_delay_fastset(current_config)) \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
if (!fastset || !pipe_config->update_lrr) { \
@@ -5638,20 +5715,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
- /*
- * Panel replay has to be enabled before link training. PSR doesn't have
- * this requirement -> check these only if using panel replay
- */
- if (current_config->active_planes &&
- (current_config->has_panel_replay ||
- pipe_config->has_panel_replay)) {
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_sel_update);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
- PIPE_CONF_CHECK_BOOL(has_panel_replay);
- }
-
PIPE_CONF_CHECK_BOOL(double_wide);
if (dev_priv->display.dpll.mgr)
@@ -5689,10 +5752,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
- if (!fastset)
+ if (!fastset) {
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
+ }
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
- PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -6026,6 +6090,14 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
+static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
+ const struct drm_display_mode *new_adjusted_mode)
+{
+ return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
+ old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal;
+}
+
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
@@ -6036,18 +6108,21 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
- if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
crtc->base.base.id, crtc->base.name);
- else
+ } else {
+ if (allow_vblank_delay_fastset(old_crtc_state))
+ new_crtc_state->update_lrr = true;
new_crtc_state->uapi.mode_changed = false;
+ }
if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
- old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
+ if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode,
+ &new_crtc_state->hw.adjusted_mode))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -6359,7 +6434,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
* the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
- * generated and the requested events are sent to the usersapce in the interrupt
+ * generated and the requested events are sent to the userspace in the interrupt
* handler itself. The timestamp and sequence sent during the flip done event
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
@@ -6493,36 +6568,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- /*
- * FIXME: This check is kept generic for all platforms.
- * Need to verify this for all gen9 platforms to enable
- * this selectively if required.
- */
- switch (new_plane_state->hw.fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- /*
- * FIXME: Async on Linear buffer is supported on ICL as
- * but with additional alignment and fbc restrictions
- * need to be taken care of. These aren't applicable for
- * gen12+.
- */
- if (DISPLAY_VER(i915) < 12) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
- plane->base.base.id, plane->base.name,
- new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
- return -EINVAL;
- }
- break;
-
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_4_TILED:
- case I915_FORMAT_MOD_4_TILED_BMG_CCS:
- case I915_FORMAT_MOD_4_TILED_LNL_CCS:
- break;
- default:
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
@@ -6530,7 +6576,8 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
return -EINVAL;
}
- if (new_plane_state->hw.fb->format->num_planes > 1) {
+ if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
+ new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] Planar formats do not support async flips\n",
plane->base.base.id, plane->base.name);
@@ -6576,6 +6623,14 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
return -EINVAL;
}
+ if (skl_plane_aux_dist(old_plane_state, 0) !=
+ skl_plane_aux_dist(new_plane_state, 0)) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
drm_dbg_kms(&i915->drm,
@@ -7640,7 +7695,6 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
/* FIXME deal with everything */
new_crtc_state->use_dsb =
new_crtc_state->update_planes &&
- !new_crtc_state->vrr.enable &&
!new_crtc_state->do_async_flip &&
!new_crtc_state->has_psr &&
!new_crtc_state->scaler_state.scaler_users &&
@@ -7681,6 +7735,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_crtc_planes_update_arm(new_crtc_state->dsb_commit,
state, crtc);
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+
if (!new_crtc_state->dsb_color_vblank) {
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
@@ -8728,5 +8784,5 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
{
- return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(i915, 6, 11) && i915_vtd_active(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index f1d76484025a..926f09c35084 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -940,7 +940,7 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
- * even driver is not initilizing DDI_C port for gen12.
+ * even driver is not initializing DDI_C port for gen12.
*/
lpsp_capable = encoder->port <= PORT_B;
else if (DISPLAY_VER(i915) == 11)
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 68cb7f9b9ef3..738ae522c8f4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1357,6 +1357,12 @@ static const struct intel_display_device_info xe2_hpd_display = {
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
+static const u16 mtl_u_ids[] = {
+ INTEL_MTL_U_IDS(ID),
+ INTEL_ARL_U_IDS(ID),
+ 0
+};
+
/*
* Do not initialize the .info member of the platform desc for GMD ID based
* platforms. Their display will be probed automatically based on the IP version
@@ -1364,6 +1370,13 @@ static const struct intel_display_device_info xe2_hpd_display = {
*/
static const struct platform_desc mtl_desc = {
PLATFORM(meteorlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(meteorlake, u),
+ .pciidlist = mtl_u_ids,
+ },
+ {},
+ }
};
static const struct platform_desc lnl_desc = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 9a333d9e6601..fc33791f02b9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -96,6 +96,7 @@ struct pci_dev;
func(dg2_g12) \
/* Display ver 14 (based on GMD ID) */ \
func(meteorlake) \
+ func(meteorlake_u) \
/* Display ver 20 (based on GMD ID) */ \
func(lunarlake) \
/* Display ver 14.1 (based on GMD ID) */ \
@@ -145,6 +146,7 @@ struct intel_display_platforms {
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
@@ -233,6 +235,17 @@ struct intel_display_platforms {
(drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
+#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
+#define ARLS_HOST_BRIDGE_PCI_ID2 0x7D2D
+#define ARLS_HOST_BRIDGE_PCI_ID3 0x7D2E
+#define ARLS_HOST_BRIDGE_PCI_ID4 0x7D2F
+
+#define IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(id) \
+ (((id) == ARLS_HOST_BRIDGE_PCI_ID1) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID2) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID3) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID4))
+
struct intel_display_runtime_info {
struct intel_display_ip_ver {
u16 ver;
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 50ec0c3c7588..c4120a834698 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -197,7 +197,7 @@ void intel_display_driver_early_probe(struct intel_display *display)
intel_dkl_phy_init(i915);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
- intel_audio_hooks_init(i915);
+ intel_audio_hooks_init(display);
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
@@ -544,11 +544,11 @@ void intel_display_driver_register(struct intel_display *display)
intel_opregion_register(display);
intel_acpi_video_register(display);
- intel_audio_init(i915);
+ intel_audio_init(display);
intel_display_driver_enable_user_access(display);
- intel_audio_register(i915);
+ intel_audio_register(display);
intel_display_debugfs_register(i915);
@@ -636,8 +636,6 @@ void intel_display_driver_remove_nogem(struct intel_display *display)
void intel_display_driver_unregister(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -652,7 +650,7 @@ void intel_display_driver_unregister(struct intel_display *display)
intel_display_driver_disable_user_access(display);
- intel_audio_deinit(i915);
+ intel_audio_deinit(display);
drm_atomic_helper_shutdown(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 069043f9d894..d9734fcd0d45 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -15,6 +15,7 @@
#include "intel_display_irq.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
#include "intel_fdi_regs.h"
@@ -25,6 +26,46 @@
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_uncore.h"
+
+static void
+intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_reset(to_intel_uncore(display->drm), regs);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
+{
+ intel_dmc_wl_get(display, reg);
+
+ gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
+
+ intel_dmc_wl_put(display, reg);
+}
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -44,6 +85,7 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -56,8 +98,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
- intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
+ intel_de_write(display, DEIMR, dev_priv->irq_mask);
+ intel_de_posting_read(display, DEIMR);
}
}
@@ -80,6 +122,7 @@ void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
void bdw_update_port_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
u32 old_val;
@@ -90,15 +133,15 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
+ intel_de_posting_read(display, GEN8_DE_PORT_IMR);
}
}
@@ -113,6 +156,7 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -128,9 +172,8 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
dev_priv->display.irq.de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
- dev_priv->display.irq.de_irq_mask[pipe]);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
+ intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -156,7 +199,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
+ struct intel_display *display = &dev_priv->display;
+ u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -168,8 +212,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
- intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
+ intel_de_write(display, SDEIMR, sdeimr);
+ intel_de_posting_read(display, SDEIMR);
}
void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
@@ -229,6 +273,7 @@ out:
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 status_mask)
{
+ struct intel_display *display = &dev_priv->display;
i915_reg_t reg = PIPESTAT(dev_priv, pipe);
u32 enable_mask;
@@ -245,13 +290,14 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 status_mask)
{
+ struct intel_display *display = &dev_priv->display;
i915_reg_t reg = PIPESTAT(dev_priv, pipe);
u32 enable_mask;
@@ -268,8 +314,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
@@ -373,55 +419,58 @@ static void flip_done_handler(struct drm_i915_private *i915,
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
+
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
+
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
+ intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
u32 res1, res2;
if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
else
res1 = 0;
if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
res1, res2);
}
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- intel_uncore_write(&dev_priv->uncore,
- PIPESTAT(dev_priv, pipe),
- PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_write(display,
+ PIPESTAT(dev_priv, pipe),
+ PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
}
@@ -430,6 +479,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -474,7 +524,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(dev_priv, pipe);
- pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
+ pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
@@ -487,8 +537,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
+ intel_de_write(display, reg, pipe_stats[pipe]);
+ intel_de_write(display, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -605,7 +655,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -624,7 +674,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
+ struct intel_display *display = &dev_priv->display;
+ u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
@@ -642,12 +693,13 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
}
}
- intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
+ intel_de_write(display, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
+ struct intel_display *display = &dev_priv->display;
+ u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -657,7 +709,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
- intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
+ intel_de_write(display, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -691,7 +743,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
@@ -732,7 +784,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ u32 pch_iir = intel_de_read(display, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -740,7 +792,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
}
if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
@@ -766,8 +818,7 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore,
- EDP_PSR_IIR, 0, 0);
+ psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
intel_psr_irq_handler(intel_dp, psr_iir);
break;
}
@@ -789,12 +840,12 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ u32 pch_iir = intel_de_read(display, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
}
}
@@ -925,8 +976,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & XELPDP_RM_TIMEOUT) {
- u32 val = intel_uncore_read(&dev_priv->uncore,
- RM_TIMEOUT_REG_CAPTURE);
+ u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
@@ -949,7 +999,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
+ psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
if (psr_iir)
found = true;
@@ -969,6 +1019,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
u32 te_trigger)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -978,8 +1029,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -991,7 +1041,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -1000,8 +1050,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1021,7 +1070,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
@@ -1034,10 +1083,11 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
{
+ struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
- *pch_iir = intel_de_read(i915, SDEIIR);
+ *pch_iir = intel_de_read(display, SDEIIR);
if (!*pch_iir)
return;
@@ -1049,15 +1099,15 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
if (*pch_iir & SDE_PICAINTERRUPT) {
drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
- pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
- *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
- intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
+ pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
+ *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
+ intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
}
- intel_de_write(i915, SDEIIR, *pch_iir);
+ intel_de_write(display, SDEIIR, *pch_iir);
if (pica_ier)
- intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
+ intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
@@ -1069,9 +1119,9 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
+ iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
+ intel_de_write(display, GEN8_DE_MISC_IIR, iir);
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
drm_err_ratelimited(&dev_priv->drm,
@@ -1080,9 +1130,9 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
+ iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
+ intel_de_write(display, GEN11_DE_HPD_IIR, iir);
gen11_hpd_irq_handler(dev_priv, iir);
} else {
drm_err_ratelimited(&dev_priv->drm,
@@ -1091,11 +1141,11 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
+ iir = intel_de_read(display, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
+ intel_de_write(display, GEN8_DE_PORT_IIR, iir);
if (iir & gen8_de_port_aux_mask(dev_priv)) {
intel_dp_aux_irq_handler(display);
@@ -1148,14 +1198,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err_ratelimited(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
@@ -1221,14 +1271,15 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
+ struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
- iir = intel_de_read(i915, GEN11_GU_MISC_IIR);
+ iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir))
- intel_de_write(i915, GEN11_GU_MISC_IIR, iir);
+ intel_de_write(display, GEN11_GU_MISC_IIR, iir);
return iir;
}
@@ -1243,6 +1294,7 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
void gen11_display_irq_handler(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
u32 disp_ctl;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1250,17 +1302,18 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
- disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL);
+ disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
gen8_de_irq_handler(i915, disp_ctl);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
lockdep_assert_held(&i915->drm.vblank_time_lock);
/*
@@ -1270,15 +1323,18 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* only when vblank/CRC interrupts are actually enabled.
*/
if (i915->display.irq.vblank_enabled++ == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
lockdep_assert_held(&i915->drm.vblank_time_lock);
if (--i915->display.irq.vblank_enabled == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
@@ -1398,7 +1454,7 @@ void ilk_disable_vblank(struct drm_crtc *crtc)
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_display *display = to_intel_display(intel_crtc);
enum port port;
if (!(intel_crtc->mode_flags &
@@ -1411,10 +1467,9 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
- enable ? 0 : DSI_TE_EVENT);
+ intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
return true;
}
@@ -1483,19 +1538,19 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
if (IS_CHERRYVIEW(dev_priv))
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
i9xx_pipestat_irq_reset(dev_priv);
- gen2_irq_reset(uncore, VLV_IRQ_REGS);
+ intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -1507,10 +1562,11 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
void i9xx_display_irq_reset(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (I915_HAS_HOTPLUG(i915)) {
i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_uncore_rmw(&i915->uncore,
- PORT_HOTPLUG_STAT(i915), 0, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
}
i9xx_pipestat_irq_reset(i915);
@@ -1518,8 +1574,7 @@ void i9xx_display_irq_reset(struct drm_i915_private *i915)
void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
+ struct intel_display *display = &dev_priv->display;
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -1547,32 +1602,32 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
if (!HAS_DISPLAY(dev_priv))
return;
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
}
void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
@@ -1580,7 +1635,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
if (DISPLAY_VER(dev_priv) >= 12) {
enum transcoder trans;
@@ -1592,39 +1647,39 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (!intel_display_power_is_enabled(dev_priv, domain))
continue;
- intel_uncore_write(uncore,
- TRANS_PSR_IMR(dev_priv, trans),
- 0xffffffff);
- intel_uncore_write(uncore,
- TRANS_PSR_IIR(dev_priv, trans),
- 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IMR(dev_priv, trans),
+ 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IIR(dev_priv, trans),
+ 0xffffffff);
}
} else {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
if (DISPLAY_VER(dev_priv) >= 14)
- gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
- gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
+ intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
@@ -1637,9 +1692,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1647,7 +1702,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1658,7 +1713,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1679,7 +1734,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
*/
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -1692,7 +1747,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
else
mask = SDE_GMBUS_CPT;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -1725,7 +1780,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
void ilk_de_irq_postinstall(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_display *display = &i915->display;
u32 display_mask, extra_mask;
if (DISPLAY_VER(i915) >= 7) {
@@ -1749,7 +1804,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
}
if (IS_HASWELL(i915)) {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -1760,8 +1815,8 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
ibx_irq_postinstall(i915);
- gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask,
- display_mask | extra_mask);
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
+ display_mask | extra_mask);
}
static void mtp_irq_postinstall(struct drm_i915_private *i915);
@@ -1770,7 +1825,6 @@ static void icp_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
GEN8_PIPE_CDCLK_CRC_DONE;
@@ -1836,11 +1890,11 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (!intel_display_power_is_enabled(dev_priv, domain))
continue;
- gen2_assert_iir_is_zero(uncore,
- TRANS_PSR_IIR(dev_priv, trans));
+ intel_display_irq_regs_assert_irr_is_zero(display,
+ TRANS_PSR_IIR(dev_priv, trans));
}
} else {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
for_each_pipe(dev_priv, pipe) {
@@ -1848,65 +1902,69 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- de_pipe_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ de_pipe_enables);
}
- gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
- gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
+ intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
+ de_port_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
+ de_misc_masked);
if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
+ de_hpd_enables);
}
}
static void mtp_irq_postinstall(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
XELPDP_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
+ de_hpd_enables);
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
if (!HAS_DISPLAY(dev_priv))
return;
gen8_de_irq_postinstall(dev_priv);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
void dg1_de_irq_postinstall(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
gen8_de_irq_postinstall(i915);
- intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
void intel_display_irq_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index f92e4640a613..c4f1ab43fc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -130,7 +130,7 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "(-1=use per-chip default, 0=disabled, 1=enabled, 2=match any register, 3=always locked) "
"Default: -1");
__maybe_unused
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d3b8453a1705..14ae60749f02 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -842,7 +842,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
* block right away if this is the last reference.
*
* This function is only for the power domain code's internal use to suppress wakeref
- * tracking when the correspondig debug kconfig option is disabled, should not
+ * tracking when the corresponding debug kconfig option is disabled, should not
* be used otherwise.
*/
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
@@ -1126,9 +1126,6 @@ static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (display->platform.alderlake_p)
- return;
-
for_each_dbuf_slice(display, slice)
intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
@@ -1681,7 +1678,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(display);
- if (DISPLAY_VER(display) >= 12)
+ if (DISPLAY_VER(display) == 12 || display->platform.dg2)
gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
@@ -1736,7 +1733,7 @@ static void icl_display_core_uninit(struct intel_display *display)
gen9_disable_dc_states(display);
intel_dmc_disable_program(display);
- /* 1. Disable all display engine functions -> aready done */
+ /* 1. Disable all display engine functions -> already done */
/* 2. Disable DBUF */
gen9_dbuf_disable(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 338379dae44c..ec8e508d0593 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -60,7 +60,7 @@ struct i915_power_well_instance {
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
- * Arbitraty data associated with this power well. Platform and power
+ * Arbitrary data associated with this power well. Platform and power
* well specific.
*/
union {
@@ -77,7 +77,7 @@ struct i915_power_well_instance {
struct {
/*
* request/status flag index in the power well
- * constrol/status registers.
+ * control/status registers.
*/
u8 idx;
} hsw;
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 338b9f7b20b8..27ebc32cb61a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -4,7 +4,11 @@
*/
#undef TRACE_SYSTEM
+#ifdef I915
#define TRACE_SYSTEM i915
+#else
+#define TRACE_SYSTEM xe
+#endif
#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
#define __INTEL_DISPLAY_TRACE_H__
@@ -21,6 +25,7 @@
#include "intel_vblank.h"
#define __dev_name_display(display) dev_name((display)->drm->dev)
+#define __dev_name_drm(obj) dev_name((obj)->dev->dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
/*
@@ -397,23 +402,24 @@ TRACE_EVENT(intel_plane_async_flip,
__entry->async_flip = async_flip;
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, async_flip=%s",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline, str_yes_no(__entry->async_flip))
);
TRACE_EVENT(intel_plane_update_noarm,
- TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
+ TP_PROTO(const struct intel_plane_state *plane_state, struct intel_crtc *crtc),
+ TP_ARGS(plane_state, crtc),
TP_STRUCT__entry(
- __string(dev, __dev_name_kms(plane))
+ __string(dev, __dev_name_drm(plane_state->uapi.plane))
__field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
+ __field(u32, format)
__array(int, src, 4)
__array(int, dst, 4)
- __string(name, plane->base.name)
+ __string(name, plane_state->uapi.plane->name)
),
TP_fast_assign(
@@ -422,29 +428,31 @@ TRACE_EVENT(intel_plane_update_noarm,
__entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
- memcpy(__entry->dst, &plane->base.state->dst, sizeof(__entry->dst));
+ __entry->format = plane_state->hw.fb->format->format;
+ memcpy(__entry->src, &plane_state->uapi.src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane_state->uapi.dst, sizeof(__entry->dst));
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, format=%p4cc, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
__get_str(dev), __entry->pipe_name, __get_str(name),
- __entry->frame, __entry->scanline,
+ __entry->frame, __entry->scanline, &__entry->format,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
TRACE_EVENT(intel_plane_update_arm,
- TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
+ TP_PROTO(const struct intel_plane_state *plane_state, struct intel_crtc *crtc),
+ TP_ARGS(plane_state, crtc),
TP_STRUCT__entry(
- __string(dev, __dev_name_kms(plane))
+ __string(dev, __dev_name_drm(plane_state->uapi.plane))
__field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
+ __field(u32, format)
__array(int, src, 4)
__array(int, dst, 4)
- __string(name, plane->base.name)
+ __string(name, plane_state->uapi.plane->name)
),
TP_fast_assign(
@@ -453,13 +461,14 @@ TRACE_EVENT(intel_plane_update_arm,
__entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
- memcpy(__entry->dst, &plane->base.state->dst, sizeof(__entry->dst));
+ __entry->format = plane_state->hw.fb->format->format;
+ memcpy(__entry->src, &plane_state->uapi.src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane_state->uapi.dst, sizeof(__entry->dst));
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, format=%p4cc, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
__get_str(dev), __entry->pipe_name, __get_str(name),
- __entry->frame, __entry->scanline,
+ __entry->frame, __entry->scanline, &__entry->format,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
@@ -484,11 +493,110 @@ TRACE_EVENT(intel_plane_disable_arm,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
+TRACE_EVENT(intel_plane_scaler_update_arm,
+ TP_PROTO(struct intel_plane *plane,
+ int scaler_id, int x, int y, int w, int h),
+ TP_ARGS(plane, scaler_id, x, y, w, h),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(plane))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(int, x)
+ __field(int, y)
+ __field(int, w)
+ __field(int, h)
+ __string(name, plane->base.name)
+ ),
+
+ TP_fast_assign(
+ struct intel_display *display = to_intel_display(plane);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe);
+ __assign_str(dev);
+ __assign_str(name);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->x = x;
+ __entry->y = y;
+ __entry->w = w;
+ __entry->h = h;
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d, plane %s, frame=%u, scanline=%u, " DRM_RECT_FMT,
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __get_str(name), __entry->frame, __entry->scanline,
+ __entry->w, __entry->h, __entry->x, __entry->y)
+);
+
+TRACE_EVENT(intel_pipe_scaler_update_arm,
+ TP_PROTO(struct intel_crtc *crtc, int scaler_id,
+ int x, int y, int w, int h),
+ TP_ARGS(crtc, scaler_id, x, y, w, h),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(crtc))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(int, x)
+ __field(int, y)
+ __field(int, w)
+ __field(int, h)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->x = x;
+ __entry->y = y;
+ __entry->w = w;
+ __entry->h = h;
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d frame=%u, scanline=%u, " DRM_RECT_FMT,
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __entry->frame, __entry->scanline,
+ __entry->w, __entry->h, __entry->x, __entry->y)
+);
+
+TRACE_EVENT(intel_scaler_disable_arm,
+ TP_PROTO(struct intel_crtc *crtc, int scaler_id),
+ TP_ARGS(crtc, scaler_id),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(crtc))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d, frame=%u, scanline=%u",
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __entry->frame, __entry->scanline)
+);
+
TRACE_EVENT(intel_fbc_activate,
TP_PROTO(struct intel_plane *plane),
TP_ARGS(plane),
@@ -512,7 +620,7 @@ TRACE_EVENT(intel_fbc_activate,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -540,7 +648,7 @@ TRACE_EVENT(intel_fbc_deactivate,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -568,7 +676,7 @@ TRACE_EVENT(intel_fbc_nuke,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 8271e50e3644..f1a9ab4ae85a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -711,6 +711,8 @@ struct intel_initial_plane_config {
struct intel_scaler {
u32 mode;
bool in_use;
+ int hscale;
+ int vscale;
};
struct intel_crtc_scaler_state {
@@ -732,7 +734,7 @@ struct intel_crtc_scaler_state {
*
* intel_atomic_setup_scalers will setup available scalers to users
* requesting scalers. It will gracefully fail if request exceeds
- * avilability.
+ * availability.
*/
#define SKL_CRTC_INDEX 31
unsigned scaler_users;
@@ -1095,6 +1097,7 @@ struct intel_crtc_state {
int max_link_bpp_x16; /* in 1/16 bpp units */
int pipe_bpp; /* in 1 bpp units */
+ int min_hblank;
struct intel_link_m_n dp_m_n;
/* m2_n2 for eDP downclock */
@@ -1113,7 +1116,7 @@ struct intel_crtc_state {
u16 su_y_granularity;
/*
- * Frequence the dpll for the port should run at. Differs from the
+ * Frequency the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
@@ -1474,6 +1477,7 @@ struct intel_plane {
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+ bool (*can_async_flip)(u64 modifier);
/* Write all non-self arming plane registers */
void (*update_noarm)(struct intel_dsb *dsb,
struct intel_plane *plane,
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 02de3ae15074..7e2ce0c2f6c3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -10,7 +10,6 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
-#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -50,12 +49,24 @@
#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
+/*
+ * Possible non-negative values for the enable_dmc_wl param.
+ */
+enum {
+ ENABLE_DMC_WL_DISABLED,
+ ENABLE_DMC_WL_ENABLED,
+ ENABLE_DMC_WL_ANY_REGISTER,
+ ENABLE_DMC_WL_ALWAYS_LOCKED,
+ ENABLE_DMC_WL_MAX,
+};
+
struct intel_dmc_wl_range {
u32 start;
u32 end;
};
static const struct intel_dmc_wl_range powered_off_ranges[] = {
+ { .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
{ .start = 0x60000, .end = 0x7ffff },
{},
};
@@ -90,6 +101,7 @@ static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
{ .start = 0x42088 }, /* CHICKEN_MISC_3 */
{ .start = 0x46160 }, /* CMTG_CLK_SEL */
{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+ { .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
{},
};
@@ -230,10 +242,15 @@ static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
return false;
}
-static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
+static bool intel_dmc_wl_check_range(struct intel_display *display,
+ i915_reg_t reg,
+ u32 dc_state)
{
const struct intel_dmc_wl_range *ranges;
+ if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
+ return true;
+
/*
* Check that the offset is in one of the ranges for which
* registers are powered off during DC states.
@@ -265,20 +282,48 @@ static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
- return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
+ return display->params.enable_dmc_wl;
}
static void intel_dmc_wl_sanitize_param(struct intel_display *display)
{
- if (!HAS_DMC_WAKELOCK(display))
- display->params.enable_dmc_wl = 0;
- else if (display->params.enable_dmc_wl >= 0)
- display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
- else
- display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
-
- drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
- display->params.enable_dmc_wl);
+ const char *desc;
+
+ if (!HAS_DMC_WAKELOCK(display)) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl < 0) {
+ if (DISPLAY_VER(display) >= 30)
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ else
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ }
+
+ drm_WARN_ON(display->drm,
+ display->params.enable_dmc_wl < 0 ||
+ display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
+
+ switch (display->params.enable_dmc_wl) {
+ case ENABLE_DMC_WL_DISABLED:
+ desc = "disabled";
+ break;
+ case ENABLE_DMC_WL_ENABLED:
+ desc = "enabled";
+ break;
+ case ENABLE_DMC_WL_ANY_REGISTER:
+ desc = "match any register";
+ break;
+ case ENABLE_DMC_WL_ALWAYS_LOCKED:
+ desc = "always locked";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
+ display->params.enable_dmc_wl, desc);
}
void intel_dmc_wl_init(struct intel_display *display)
@@ -292,7 +337,8 @@ void intel_dmc_wl_init(struct intel_display *display)
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
spin_lock_init(&wl->lock);
- refcount_set(&wl->refcount, 0);
+ refcount_set(&wl->refcount,
+ display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
}
/* Must only be called as part of enabling dynamic DC states. */
@@ -398,7 +444,8 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
spin_lock_irqsave(&wl->lock, flags);
- if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (!wl->enabled) {
@@ -430,7 +477,8 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
spin_lock_irqsave(&wl->lock, flags);
- if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f1f3b1bb1e89..09211ae38d96 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1075,7 +1075,7 @@ static bool source_can_output(struct intel_dp *intel_dp,
/*
* No YCbCr output support on gmch platforms.
* Also, ILK doesn't seem capable of DP YCbCr output.
- * The displayed image is severly corrupted. SNB+ is fine.
+ * The displayed image is severely corrupted. SNB+ is fine.
*/
return !HAS_GMCH(display) && !display->platform.ironlake;
@@ -1791,7 +1791,7 @@ int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
@@ -1926,7 +1926,7 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
-static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
+static bool is_bw_sufficient_for_dsc_config(int dsc_bpp_x16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
@@ -1934,15 +1934,16 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_cloc
u32 available_bw, required_bw;
available_bw = (link_clock * lane_count * timeslots * 16) / 8;
- required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
+ required_bw = dsc_bpp_x16 * (intel_dp_mode_to_fec_clock(mode_clock));
return available_bw > required_bw;
}
static int dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- u16 compressed_bppx16,
+ struct drm_connector_state *conn_state,
+ const struct link_config_limits *limits,
+ int dsc_bpp_x16,
int timeslots)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1957,15 +1958,37 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
- lane_count, adjusted_mode->clock,
- pipe_config->output_format,
- timeslots))
- continue;
+ /*
+ * FIXME: intel_dp_mtp_tu_compute_config() requires
+ * ->lane_count and ->port_clock set before we know
+ * they'll work. If we end up failing altogether,
+ * they'll remain in crtc state. This shouldn't matter,
+ * as we'd then bail out from compute config, but it's
+ * just ugly.
+ */
pipe_config->lane_count = lane_count;
pipe_config->port_clock = link_rate;
+ if (drm_dp_is_uhbr_rate(link_rate)) {
+ int ret;
+
+ ret = intel_dp_mtp_tu_compute_config(intel_dp,
+ pipe_config,
+ conn_state,
+ dsc_bpp_x16,
+ dsc_bpp_x16,
+ 0, true);
+ if (ret)
+ continue;
+ } else {
+ if (!is_bw_sufficient_for_dsc_config(dsc_bpp_x16, link_rate,
+ lane_count, adjusted_mode->clock,
+ pipe_config->output_format,
+ timeslots))
+ continue;
+ }
+
return 0;
}
}
@@ -2055,112 +2078,66 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
}
/*
- * From a list of valid compressed bpps try different compressed bpp and find a
- * suitable link configuration that can support it.
+ * Note: for pre-13 display you still need to check the validity of each step.
*/
-static int
-icl_dsc_compute_link_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- int dsc_max_bpp,
- int dsc_min_bpp,
- int pipe_bpp,
- int timeslots)
+static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
{
- int i, ret;
-
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
-
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
- continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
+ struct intel_display *display = to_intel_display(connector);
+ u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- ret = dsc_compute_link_config(intel_dp,
- pipe_config,
- limits,
- valid_dsc_bpp[i] << 4,
- timeslots);
- if (ret == 0) {
- pipe_config->dsc.compressed_bpp_x16 =
- fxp_q4_from_int(valid_dsc_bpp[i]);
- return 0;
- }
- }
+ if (DISPLAY_VER(display) < 14 || !incr)
+ return fxp_q4_from_int(1);
- return -EINVAL;
+ /* fxp q4 */
+ return fxp_q4_from_int(1) / incr;
}
-/*
- * From XE_LPD onwards we supports compression bpps in steps of 1 up to
- * uncompressed bpp-1. So we start from max compressed bpp and see if any
- * link configuration is able to support that compressed bpp, if not we
- * step down and check for lower compressed bpp.
- */
-static int
-xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- int dsc_max_bpp,
- int dsc_min_bpp,
- int pipe_bpp,
- int timeslots)
+/* Note: This is not universally usable! */
+static bool intel_dp_dsc_valid_bpp(struct intel_dp *intel_dp, int bpp_x16)
{
struct intel_display *display = to_intel_display(intel_dp);
- u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- u16 compressed_bppx16;
- u8 bppx16_step;
- int ret;
+ int i;
- if (DISPLAY_VER(display) < 14 || bppx16_incr <= 1)
- bppx16_step = 16;
- else
- bppx16_step = 16 / bppx16_incr;
+ if (DISPLAY_VER(display) >= 13) {
+ if (intel_dp->force_dsc_fractional_bpp_en && !fxp_q4_to_frac(bpp_x16))
+ return false;
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
- dsc_min_bpp = dsc_min_bpp << 4;
-
- for (compressed_bppx16 = dsc_max_bpp;
- compressed_bppx16 >= dsc_min_bpp;
- compressed_bppx16 -= bppx16_step) {
- if (intel_dp->force_dsc_fractional_bpp_en &&
- !fxp_q4_to_frac(compressed_bppx16))
- continue;
- ret = dsc_compute_link_config(intel_dp,
- pipe_config,
- limits,
- compressed_bppx16,
- timeslots);
- if (ret == 0) {
- pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
- if (intel_dp->force_dsc_fractional_bpp_en &&
- fxp_q4_to_frac(compressed_bppx16))
- drm_dbg_kms(display->drm,
- "Forcing DSC fractional bpp\n");
+ return true;
+ }
- return 0;
- }
+ if (fxp_q4_to_frac(bpp_x16))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
+ if (fxp_q4_to_int(bpp_x16) == valid_dsc_bpp[i])
+ return true;
}
- return -EINVAL;
+
+ return false;
}
+/*
+ * Find the max compressed BPP we can find a link configuration for. The BPPs to
+ * try depend on the source (platform) and sink.
+ */
static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ const struct link_config_limits *limits,
int pipe_bpp,
int timeslots)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int output_bpp;
int dsc_min_bpp;
int dsc_max_bpp;
+ int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
+ int bpp_x16;
+ int ret;
dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
@@ -2169,11 +2146,38 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
num_joined_pipes);
dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
- if (DISPLAY_VER(display) >= 13)
- return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
- dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
- return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
- dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
+ /* FIXME: remove the round trip via integers */
+ min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
+ max_bpp_x16 = fxp_q4_from_int(dsc_max_bpp);
+
+ bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
+
+ /* Compressed BPP should be less than the Input DSC bpp */
+ output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
+ max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
+
+ for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
+ if (!intel_dp_dsc_valid_bpp(intel_dp, bpp_x16))
+ continue;
+
+ ret = dsc_compute_link_config(intel_dp,
+ pipe_config,
+ conn_state,
+ limits,
+ bpp_x16,
+ timeslots);
+ if (ret == 0) {
+ pipe_config->dsc.compressed_bpp_x16 = bpp_x16;
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ fxp_q4_to_frac(bpp_x16))
+ drm_dbg_kms(display->drm,
+ "Forcing DSC fractional bpp\n");
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
int intel_dp_dsc_min_src_input_bpc(void)
@@ -2183,7 +2187,7 @@ int intel_dp_dsc_min_src_input_bpc(void)
}
static
-bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
+bool is_dsc_pipe_bpp_sufficient(const struct link_config_limits *limits,
int pipe_bpp)
{
return pipe_bpp >= limits->pipe.min_bpp &&
@@ -2192,7 +2196,7 @@ bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
static
int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
int forced_bpp;
@@ -2218,13 +2222,11 @@ int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
+ const struct link_config_limits *limits,
int timeslots)
{
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- int dsc_max_bpp;
- int dsc_min_bpp;
u8 dsc_bpc[3] = {};
int forced_bpp, pipe_bpp;
int num_bpc, i, ret;
@@ -2232,7 +2234,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
- ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
+ ret = dsc_compute_compressed_bpp(intel_dp, pipe_config, conn_state,
limits, forced_bpp, timeslots);
if (ret == 0) {
pipe_config->pipe_bpp = forced_bpp;
@@ -2240,9 +2242,6 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpp = limits->pipe.max_bpp;
- dsc_min_bpp = limits->pipe.min_bpp;
-
/*
* Get the maximum DSC bpc that will be supported by any valid
* link configuration and compressed bpp.
@@ -2250,11 +2249,10 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc);
for (i = 0; i < num_bpc; i++) {
pipe_bpp = dsc_bpc[i] * 3;
- if (pipe_bpp < dsc_min_bpp)
- break;
- if (pipe_bpp > dsc_max_bpp)
+ if (pipe_bpp < limits->pipe.min_bpp || pipe_bpp > limits->pipe.max_bpp)
continue;
- ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
+
+ ret = dsc_compute_compressed_bpp(intel_dp, pipe_config, conn_state,
limits, pipe_bpp, timeslots);
if (ret == 0) {
pipe_config->pipe_bpp = pipe_bpp;
@@ -2268,7 +2266,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
@@ -2333,9 +2331,8 @@ static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
- int timeslots,
- bool compute_pipe_bpp)
+ const struct link_config_limits *limits,
+ int timeslots)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector =
@@ -2343,6 +2340,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
+ bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
int ret;
intel_dp_fec_compute_config(intel_dp, pipe_config);
@@ -2351,12 +2349,10 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return -EINVAL;
/*
- * compute pipe bpp is set to false for DP MST DSC case
- * and compressed_bpp is calculated same time once
- * vpci timeslots are allocated, because overall bpp
- * calculation procedure is bit different for MST case.
+ * Link parameters, pipe bpp and compressed bpp have already been
+ * figured out for DP MST DSC.
*/
- if (compute_pipe_bpp) {
+ if (!is_mst) {
if (intel_dp_is_edp(intel_dp))
ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
conn_state, limits);
@@ -2519,9 +2515,6 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
- /* FIXME 128b/132b SST+DSC support missing */
- if (!is_mst && dsc)
- limits->max_rate = min(limits->max_rate, 810000);
limits->min_rate = min(limits->min_rate, limits->max_rate);
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
@@ -2641,9 +2634,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (!ret && intel_dp_is_uhbr(pipe_config))
ret = intel_dp_mtp_tu_compute_config(intel_dp,
pipe_config,
- pipe_config->pipe_bpp,
- pipe_config->pipe_bpp,
conn_state,
+ fxp_q4_from_int(pipe_config->pipe_bpp),
+ fxp_q4_from_int(pipe_config->pipe_bpp),
0, false);
if (ret)
dsc_needed = true;
@@ -2667,7 +2660,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return -EINVAL;
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
- conn_state, &limits, 64, true);
+ conn_state, &limits, 64);
if (ret < 0)
return ret;
}
@@ -2824,24 +2817,22 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported)
+ if (!intel_vrr_possible(crtc_state) || !intel_dp->as_sdp_supported)
return;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
+ as_sdp->vtotal = intel_vrr_vmin_vtotal(crtc_state);
if (crtc_state->cmrr.enable) {
as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED;
- as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
- as_sdp->vtotal = adjusted_mode->vtotal;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->target_rr = 0;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index ca49f0a05da5..9189db4c2594 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -75,9 +75,8 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
- int timeslots,
- bool recompute_pipe_bpp);
+ const struct link_config_limits *limits,
+ int timeslots);
void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 8b1977cfec50..9cb22baafeeb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -783,7 +783,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
/*
* WaEdpLinkRateDataReload
*
- * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * Parade PS8461E MUX (used on various TGL+ laptops) needs
* to snoop the link rates reported by the sink when we
* use LINK_RATE_SET in order to operate in jitter cleaning
* mode (as opposed to redriver mode). Unfortunately it
@@ -1629,7 +1629,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
/*
* Ignore the link failure in CI
*
- * In fixed enviroments like CI, sometimes unexpected long HPDs are
+ * In fixed environments like CI, sometimes unexpected long HPDs are
* generated by the displays. If ignore_long_hpd flag is set, such long
* HPDs are ignored. And probably as a consequence of these ignored
* long HPDs, subsequent link trainings are failed resulting into CI
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c44fc7dd86c..cc6e4ca37519 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -111,7 +111,7 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
&crtc_state->hw.adjusted_mode;
if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
- return INT_MAX;
+ return 0;
/*
* DSC->DPT interface width:
@@ -209,22 +209,58 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
+static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ struct intel_connector *connector,
+ int bpp_x16)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ int hblank;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hblank = DIV_ROUND_UP((DIV_ROUND_UP
+ (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
+ symbol_size);
+
+ crtc_state->min_hblank = hblank;
+}
+
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
struct drm_connector_state *conn_state,
- int step, bool dsc)
+ int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc)
{
struct intel_display *display = to_intel_display(intel_dp);
struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_dp_mst_topology_state *mst_state = NULL;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- fixed20_12 pbn_div;
- int bpp, slots = -EINVAL;
+ bool is_mst = intel_dp->is_mst;
+ int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
- int max_dpt_bpp;
+ int max_dpt_bpp_x16;
+
+ /* shouldn't happen, sanity check */
+ drm_WARN_ON(display->drm, !dsc && (fxp_q4_to_frac(min_bpp_x16) ||
+ fxp_q4_to_frac(max_bpp_x16) ||
+ fxp_q4_to_frac(bpp_step_x16)));
+
+ if (is_mst) {
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
+ }
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
@@ -233,18 +269,15 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
- pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
- crtc_state->lane_count);
-
- max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
- if (max_bpp > max_dpt_bpp) {
- drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
- max_bpp, max_dpt_bpp);
- max_bpp = max_dpt_bpp;
+ max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
+ if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
+ drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (" FXP_Q4_FMT " -> " FXP_Q4_FMT ")\n",
+ FXP_Q4_ARGS(max_bpp_x16), FXP_Q4_ARGS(max_dpt_bpp_x16));
+ max_bpp_x16 = max_dpt_bpp_x16;
}
- drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n",
- min_bpp, max_bpp);
+ drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n",
+ FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16));
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
@@ -255,23 +288,27 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
}
- for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
int local_bw_overhead;
int link_bpp_x16;
- drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp);
+ drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
- link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
- intel_dp_output_bpp(crtc_state->output_format, bpp));
+ link_bpp_x16 = dsc ? bpp_x16 :
+ fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ fxp_q4_to_int(bpp_x16)));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
+
+ intel_dp_mst_compute_min_hblank(crtc_state, connector, link_bpp_x16);
+
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
- if (intel_dp->is_mst) {
+ if (is_mst) {
int remote_bw_overhead;
int remote_tu;
fixed20_12 pbn;
@@ -296,7 +333,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
link_bpp_x16,
remote_bw_overhead));
- remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full);
+ remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
/*
* Aligning the TUs ensures that symbols consisting of multiple
@@ -314,7 +351,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
* allocated for the whole path and the TUs allocated for the
* first branch device's link also applies here.
*/
- pbn.full = remote_tu * pbn_div.full;
+ pbn.full = remote_tu * mst_state->pbn_div.full;
drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_tu;
@@ -341,6 +378,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
+
+ /* Allow using zero step to indicate one try */
+ if (!bpp_step_x16)
+ break;
}
if (slots < 0) {
@@ -350,65 +391,42 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
if (!dsc)
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = fxp_q4_to_int(bpp_x16);
else
- crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
+ crtc_state->dsc.compressed_bpp_x16 = bpp_x16;
- drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n",
- slots, bpp, dsc);
+ drm_dbg_kms(display->drm, "Got %d slots for pipe bpp " FXP_Q4_FMT " dsc %d\n",
+ slots, FXP_Q4_ARGS(bpp_x16), dsc);
return 0;
}
-static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
- struct link_config_limits *limits,
- struct drm_connector_state *conn_state,
- int step, bool dsc)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_dp_mst_topology_state *mst_state;
-
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
- if (IS_ERR(mst_state))
- return PTR_ERR(mst_state);
-
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
-
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
- crtc_state->lane_count);
-
- return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state,
- max_bpp, min_bpp,
- conn_state, step, dsc);
-}
-
static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
- return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state,
- fxp_q4_to_int(limits->link.max_bpp_x16),
- fxp_q4_to_int(limits->link.min_bpp_x16),
- limits,
- conn_state, 2 * 3, false);
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
+ limits->link.min_bpp_x16,
+ limits->link.max_bpp_x16,
+ fxp_q4_from_int(2 * 3), false);
}
static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- int i, num_bpc;
+ int num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
int min_compressed_bpp, max_compressed_bpp;
@@ -422,15 +440,8 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
- sink_max_bpp = dsc_bpc[0] * 3;
- sink_min_bpp = sink_max_bpp;
-
- for (i = 1; i < num_bpc; i++) {
- if (sink_min_bpp > dsc_bpc[i] * 3)
- sink_min_bpp = dsc_bpc[i] * 3;
- if (sink_max_bpp < dsc_bpc[i] * 3)
- sink_max_bpp = dsc_bpc[i] * 3;
- }
+ sink_min_bpp = min_array(dsc_bpc, num_bpc) * 3;
+ sink_max_bpp = max_array(dsc_bpc, num_bpc) * 3;
drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
@@ -455,9 +466,13 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
crtc_state->pipe_bpp);
- return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp,
- min_compressed_bpp, limits,
- conn_state, 1, true);
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
+ fxp_q4_from_int(min_compressed_bpp),
+ fxp_q4_from_int(max_compressed_bpp),
+ fxp_q4_from_int(1), true);
}
static int mst_stream_update_slots(struct intel_dp *intel_dp,
@@ -679,7 +694,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
- pipe_config->dp_m_n.tu, false);
+ pipe_config->dp_m_n.tu);
}
if (ret)
@@ -837,7 +852,7 @@ static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
@@ -943,33 +958,32 @@ mst_connector_atomic_topology_check(struct intel_connector *connector,
}
static int
-mst_connector_atomic_check(struct drm_connector *connector,
+mst_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_digital_connector_atomic_check(connector, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
- ret = mst_connector_atomic_topology_check(intel_connector, state);
+ ret = mst_connector_atomic_topology_check(connector, state);
if (ret)
return ret;
- if (intel_connector_needs_modeset(state, connector)) {
+ if (intel_connector_needs_modeset(state, &connector->base)) {
ret = intel_dp_tunnel_atomic_check_state(state,
- intel_connector->mst_port,
- intel_connector);
+ connector->mst_port,
+ connector);
if (ret)
return ret;
}
return drm_dp_atomic_release_time_slots(&state->base,
- &intel_connector->mst_port->mst_mgr,
- intel_connector->port);
+ &connector->mst_port->mst_mgr,
+ connector->port);
}
static void mst_stream_disable(struct intel_atomic_state *state,
@@ -982,6 +996,7 @@ static void mst_stream_disable(struct intel_atomic_state *state,
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ enum transcoder trans = old_crtc_state->cpu_transcoder;
drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
@@ -992,6 +1007,8 @@ static void mst_stream_disable(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
+
+ intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0x00);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1220,11 +1237,10 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
- if (!IS_ALDERLAKE_P(i915))
+ if (!display->platform.alderlake_p)
return;
if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
@@ -1265,7 +1281,7 @@ static void mst_stream_enable(struct intel_atomic_state *state,
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
struct intel_crtc *pipe_crtc;
- int ret, i;
+ int ret, i, min_hblank;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1280,6 +1296,29 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
+ if (DISPLAY_VER(display) >= 20) {
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = pipe_config->min_hblank - 2;
+
+ /* Maximum value to be programmed is limited to 0x10 */
+ min_hblank = min(0x10, min_hblank);
+
+ /*
+ * Minimum hblank accepted for 128b/132b would be 5 and for
+ * 8b/10b would be 3 symbol count
+ */
+ if (intel_dp_is_uhbr(pipe_config))
+ min_hblank = max(min_hblank, 5);
+ else
+ min_hblank = max(min_hblank, 3);
+
+ intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
+ min_hblank);
+ }
+
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
@@ -1349,23 +1388,23 @@ static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
}
-static int mst_connector_get_ddc_modes(struct drm_connector *connector)
+static int mst_connector_get_ddc_modes(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
- if (drm_connector_is_unregistered(connector))
- return intel_connector_update_modes(connector, NULL);
+ if (drm_connector_is_unregistered(&connector->base))
+ return intel_connector_update_modes(&connector->base, NULL);
if (!intel_display_driver_check_access(display))
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
- drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
+ drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst_mgr, connector->port);
- ret = intel_connector_update_modes(connector, drm_edid);
+ ret = intel_connector_update_modes(&connector->base, drm_edid);
drm_edid_free(drm_edid);
@@ -1373,32 +1412,29 @@ static int mst_connector_get_ddc_modes(struct drm_connector *connector)
}
static int
-mst_connector_late_register(struct drm_connector *connector)
+mst_connector_late_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = drm_dp_mst_connector_late_register(connector,
- intel_connector->port);
+ ret = drm_dp_mst_connector_late_register(&connector->base, connector->port);
if (ret < 0)
return ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret < 0)
- drm_dp_mst_connector_early_unregister(connector,
- intel_connector->port);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->port);
return ret;
}
static void
-mst_connector_early_unregister(struct drm_connector *connector)
+mst_connector_early_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_connector_unregister(connector);
- drm_dp_mst_connector_early_unregister(connector,
- intel_connector->port);
+ intel_connector_unregister(&connector->base);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->port);
}
static const struct drm_connector_funcs mst_connector_funcs = {
@@ -1412,23 +1448,25 @@ static const struct drm_connector_funcs mst_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int mst_connector_get_modes(struct drm_connector *connector)
+static int mst_connector_get_modes(struct drm_connector *_connector)
{
- return mst_connector_get_ddc_modes(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+
+ return mst_connector_get_ddc_modes(&connector->base);
}
static int
-mst_connector_mode_valid_ctx(struct drm_connector *connector,
+mst_connector_mode_valid_ctx(struct drm_connector *_connector,
struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_dp *intel_dp = connector->mst_port;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
- struct drm_dp_mst_port *port = intel_connector->port;
+ struct drm_dp_mst_port *port = connector->port;
const int min_bpp = 18;
int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1439,12 +1477,12 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
int target_clock = mode->clock;
int num_joined_pipes;
- if (drm_connector_is_unregistered(connector)) {
+ if (drm_connector_is_unregistered(&connector->base)) {
*status = MODE_ERROR;
return 0;
}
- *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ *status = intel_cpu_transcoder_mode_valid(i915, mode);
if (*status != MODE_OK)
return 0;
@@ -1477,7 +1515,7 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector,
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
@@ -1491,14 +1529,14 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- if (intel_dp_has_dsc(intel_connector)) {
+ if (intel_dp_has_dsc(connector)) {
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
*/
- int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX);
+ int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
- if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
+ if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
@@ -1509,7 +1547,7 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
INTEL_OUTPUT_FORMAT_RGB,
pipe_bpp, 64);
dsc_slice_count =
- intel_dp_dsc_get_slice_count(intel_connector,
+ intel_dp_dsc_get_slice_count(connector,
target_clock,
mode->hdisplay,
num_joined_pipes);
@@ -1528,44 +1566,44 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
+ *status = intel_mode_valid_max_plane_size(i915, mode, num_joined_pipes);
return 0;
}
static struct drm_encoder *
-mst_connector_atomic_best_encoder(struct drm_connector *connector,
+mst_connector_atomic_best_encoder(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
- connector);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_connector_state *connector_state =
+ drm_atomic_get_new_connector_state(state, &connector->base);
+ struct intel_dp *intel_dp = connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
static int
-mst_connector_detect_ctx(struct drm_connector *connector,
+mst_connector_detect_ctx(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = connector->mst_port;
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (drm_connector_is_unregistered(connector))
+ if (drm_connector_is_unregistered(&connector->base))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
- intel_dp_flush_connector_commits(intel_connector);
+ intel_dp_flush_connector_commits(connector);
- return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
- intel_connector->port);
+ return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst_mgr,
+ connector->port);
}
static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
@@ -1601,29 +1639,30 @@ static bool mst_connector_get_hw_state(struct intel_connector *connector)
}
static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
const char *pathprop)
{
struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = to_intel_connector(_connector);
- drm_object_attach_property(&connector->base,
+ drm_object_attach_property(&connector->base.base,
display->drm->mode_config.path_property, 0);
- drm_object_attach_property(&connector->base,
+ drm_object_attach_property(&connector->base.base,
display->drm->mode_config.tile_property, 0);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
/*
* Reuse the prop from the SST connector because we're
* not allowed to create new props after device registration.
*/
- connector->max_bpc_property =
+ connector->base.max_bpc_property =
intel_dp->attached_connector->base.max_bpc_property;
- if (connector->max_bpc_property)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ if (connector->base.max_bpc_property)
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
- return drm_connector_set_path_property(connector, pathprop);
+ return drm_connector_set_path_property(&connector->base, pathprop);
}
static void
@@ -1696,62 +1735,60 @@ mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_connector *connector;
enum pipe pipe;
int ret;
- intel_connector = intel_connector_alloc();
- if (!intel_connector)
+ connector = intel_connector_alloc();
+ if (!connector)
return NULL;
- connector = &intel_connector->base;
-
- intel_connector->get_hw_state = mst_connector_get_hw_state;
- intel_connector->sync_state = intel_dp_connector_sync_state;
- intel_connector->mst_port = intel_dp;
- intel_connector->port = port;
+ connector->get_hw_state = mst_connector_get_hw_state;
+ connector->sync_state = intel_dp_connector_sync_state;
+ connector->mst_port = intel_dp;
+ connector->port = port;
drm_dp_mst_get_port_malloc(port);
- intel_dp_init_modeset_retry_work(intel_connector);
+ intel_dp_init_modeset_retry_work(connector);
- ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs,
+ ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort, NULL);
- if (ret) {
- drm_dp_mst_put_port_malloc(port);
- intel_connector_free(intel_connector);
- return NULL;
- }
+ if (ret)
+ goto err_put_port;
- intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
- intel_connector->dp.dsc_hblank_expansion_quirk =
- detect_dsc_hblank_expansion_quirk(intel_connector);
+ connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, connector);
+ connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(connector);
- drm_connector_helper_add(connector, &mst_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &mst_connector_helper_funcs);
for_each_pipe(display, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
- ret = drm_connector_attach_encoder(&intel_connector->base, enc);
+ ret = drm_connector_attach_encoder(&connector->base, enc);
if (ret)
- goto err;
+ goto err_cleanup_connector;
}
- ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop);
+ ret = mst_topology_add_connector_properties(intel_dp, &connector->base, pathprop);
if (ret)
- goto err;
+ goto err_cleanup_connector;
- ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ ret = intel_dp_hdcp_init(dig_port, connector);
if (ret)
drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
- connector->name, connector->base.id);
+ connector->base.name, connector->base.base.id);
+
+ return &connector->base;
- return connector;
+err_cleanup_connector:
+ drm_connector_cleanup(&connector->base);
+err_put_port:
+ drm_dp_mst_put_port_malloc(port);
+ intel_connector_free(connector);
-err:
- drm_connector_cleanup(connector);
return NULL;
}
@@ -2056,7 +2093,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
* @intel_dp: DP port object
*
* Prepare an MST link for topology probing, programming the target
- * link parameters to DPCD. This step is a requirement of the enumaration
+ * link parameters to DPCD. This step is a requirement of the enumeration
* of path resources during probing.
*/
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c6bdc1d190a4..c1bbfeb02ca9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -33,8 +33,7 @@ bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
struct drm_connector_state *conn_state,
- int step, bool dsc);
+ int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 380b359b0420..614b90d6938f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -257,7 +257,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
/*
* FIXME: Ideally pattern should come from DPCD 0x250. As
* current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
+ * now for compliance test.
*/
drm_dbg_kms(display->drm,
"Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
@@ -275,7 +275,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
/*
* FIXME: Ideally pattern should come from DPCD 0x24A. As
* current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
+ * now for compliance test.
*/
drm_dbg_kms(display->drm,
"Set HBR2 compliance Phy Test Pattern\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 589872babdd7..280f302967e3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -647,7 +647,7 @@ void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index e9314cf25a19..7f0f720e8dca 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -20,7 +20,8 @@ struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
-#if IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
+#if (IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)) || \
+ (IS_ENABLED(CONFIG_DRM_XE_DP_TUNNEL) && !defined(I915))
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
@@ -127,6 +128,6 @@ intel_dp_tunnel_mgr_init(struct intel_display *display)
static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {}
-#endif /* CONFIG_DRM_I915_DP_TUNNEL */
+#endif /* CONFIG_DRM_I915_DP_TUNNEL || CONFIG_DRM_XE_DP_TUNNEL */
#endif /* __INTEL_DP_TUNNEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 52a36a2281e6..5f88702818d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -40,7 +40,7 @@
* VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
+ * registers, and instead their registers must be accessed through IOSF
* sideband. VLV has one such PHY for driving ports B and C, and CHV
* adds another PHY for driving port D. Each PHY responds to specific
* IOSF-SB port.
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index d86cc9ffd4ac..b8fa04d3cd5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -4372,7 +4372,7 @@ void intel_shared_dpll_init(struct drm_i915_private *i915)
* calling intel_shared_dpll_swap_state().
*
* Returns:
- * 0 on success, negative error code on falure.
+ * 0 on success, negative error code on failure.
*/
int intel_compute_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 6af325b8e27d..3eee76874304 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -318,7 +318,7 @@ struct dpll_info {
const struct intel_shared_dpll_funcs *funcs;
/**
- * @id: unique indentifier for this DPLL
+ * @id: unique identifier for this DPLL
*/
enum intel_dpll_id id;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index e6f8fc743fb4..2f2812c23972 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -109,38 +109,26 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
}
-static const struct intel_crtc_state *
-pre_commit_crtc_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int dsb_vblank_delay(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- return new_crtc_state;
+ if (pre_commit_is_vrr_active(state, crtc))
+ return intel_vrr_vblank_delay(crtc_state);
else
- return old_crtc_state;
-}
-
-static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state)
-{
- return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
- intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
+ return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
}
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
if (pre_commit_is_vrr_active(state, crtc))
- return crtc_state->vrr.vmax;
+ return intel_vrr_vmax_vtotal(crtc_state);
else
return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
}
@@ -148,7 +136,8 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
struct drm_i915_private *i915 = to_i915(state->base.dev);
unsigned int latency = skl_watermark_max_latency(i915, 0);
@@ -159,7 +148,8 @@ static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
}
@@ -167,7 +157,8 @@ static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
static int dsb_scanline_to_hw(struct intel_atomic_state *state,
struct intel_crtc *crtc, int scanline)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
int vtotal = dsb_vtotal(state, crtc);
return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
@@ -532,13 +523,15 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
/* FIXME calibrate sensibly */
int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
- int vblank_delay = dsb_vblank_delay(crtc_state);
int start, end;
if (pre_commit_is_vrr_active(state, crtc)) {
+ int vblank_delay = intel_vrr_vblank_delay(crtc_state);
+
end = intel_vrr_vmin_vblank_start(crtc_state);
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
@@ -547,6 +540,8 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
} else {
+ int vblank_delay = intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
+
end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
@@ -624,9 +619,10 @@ void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
- dsb_vblank_delay(crtc_state)) + 1;
+ dsb_vblank_delay(state, crtc)) + 1;
intel_dsb_wait_usec(dsb, usecs);
}
@@ -825,7 +821,7 @@ void intel_dsb_irq_handler(struct intel_display *display,
if (crtc->dsb_event) {
/*
- * Update vblank counter/timestmap in case it
+ * Update vblank counter/timestamp in case it
* hasn't been done yet for this frame.
*/
drm_crtc_accurate_vblank_count(&crtc->base);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index b2b78f39cfd3..7b2ffd14ae6e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -582,7 +582,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
/*
* MIPI Sequence from VBT #53 parsing logic
- * We have already separated each seqence during bios parsing
+ * We have already separated each sequence during bios parsing
* Following is generic execution function for any sequence
*/
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index 4bf476656b8c..3be1a16cac20 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -57,7 +57,7 @@ struct intel_dvo_dev_ops {
* Turn on/off output.
*
* Because none of our dvo drivers support an intermediate power levels,
- * we don't expose this in the interfac.
+ * we don't expose this in the interface.
*/
void (*dpms)(struct intel_dvo_device *dvo, bool enable);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 223c4218c019..9f7f1b9f3275 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,10 +1694,24 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], 64))
- continue;
- else
+ unsigned int end;
+
+ if (!IS_ALIGNED(fb->base.offsets[i], 64)) {
+ drm_dbg_kms(&i915->drm,
+ "fb misaligned clear color plane %d offset (0x%x)\n",
+ i, fb->base.offsets[i]);
+ return -EINVAL;
+ }
+
+ if (check_add_overflow(fb->base.offsets[i], 64, &end)) {
+ drm_dbg_kms(&i915->drm,
+ "fb bad clear color plane %d offset (0x%x)\n",
+ i, fb->base.offsets[i]);
return -EINVAL;
+ }
+
+ max_size = max(max_size, DIV_ROUND_UP(end, tile_size));
+ continue;
}
intel_fb_plane_dims(fb, i, &width, &height);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index d3a86f9c6bc8..dd3ac7f98dfc 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -252,7 +252,8 @@ intel_plane_fb_min_phys_alignment(const struct intel_plane_state *plane_state)
return plane->min_alignment(plane, fb, 0);
}
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+int intel_plane_pin_fb(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *old_plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct intel_framebuffer *fb =
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index ac0319b53af0..0fc6d9044638 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -23,7 +23,8 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags);
-int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 00852ff5b247..6c0808133397 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -695,3 +695,8 @@ struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
return to_intel_framebuffer(fbdev->helper.fb);
}
+
+struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
+{
+ return fbdev ? fbdev->vma : NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 08de2d5b3433..24a3434558cb 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -17,6 +17,8 @@ struct intel_framebuffer;
void intel_fbdev_setup(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
+struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
+
#else
static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
@@ -30,6 +32,12 @@ static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbd
{
return NULL;
}
+
+static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
+{
+ return NULL;
+}
+
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 37cdfa9c692a..3e8d6d8af780 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -390,7 +390,7 @@ static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs
* in @state must be recomputed
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index cda1daf4cdea..18fcdbe1248a 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -290,7 +290,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
}
/**
- * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrun reporting state
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
* @enable: whether underruns should be reported or not
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 6ed5f726ee60..26128c610cb4 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -227,7 +227,7 @@ static void intel_frontbuffer_flush_work(struct work_struct *work)
* @front: GEM object to flush
*
* This function is targeted for our dirty callback for queueing flush when
- * dma fence is signales
+ * dma fence is signals
*/
void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7464b44c8bb3..7cc0399b2a5d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -41,7 +41,7 @@ intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
if (DISPLAY_VER(display) >= 30) {
@@ -353,7 +353,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
/*
* Another req for hdcp key loadability is enabled state of pll for
- * cdclk. Without active crtc we wont land here. So we are assuming that
+ * cdclk. Without active crtc we won't land here. So we are assuming that
* cdclk is already on.
*/
@@ -1550,9 +1550,9 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
* with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
* (dock decides to stop advertising hdcp2 capability for some reason).
* The reason being that during suspend resume dock usually keeps the
- * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
+ * HDCP2 registers inaccessible causing AUX error. This wouldn't be a
* big problem if the userspace just kept retrying with some delay while
- * it continues to play low value content but most userpace applications
+ * it continues to play low value content but most userspace applications
* end up throwing an error when it receives one from KMD. This makes
* sure we give the dock and the sink devices to complete its power cycle
* and then try HDCP authentication. The values of 10 and delay of 50ms
@@ -2188,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
@@ -2560,7 +2573,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
/*
* During the HDCP encryption session if Type change is requested,
- * disable the HDCP and reenable it with new TYPE value.
+ * disable the HDCP and re-enable it with new TYPE value.
*/
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ed29dd0ccef0..95584b61cf08 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1909,18 +1909,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
return MODE_CLOCK_RANGE;
- /*
- * SNPS PHYs' MPLLB table-based programming can only handle a fixed
- * set of link rates.
- *
- * FIXME: We will hopefully get an algorithmic way of programming
- * the MPLLB for HDMI in the future.
- */
- if (DISPLAY_VER(display) >= 14)
- return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
- else if (IS_DG2(dev_priv))
- return intel_snps_phy_check_hdmi_link_rate(clock);
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 3adc791d3776..c0d48f651dab 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -806,7 +806,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
* of the powerwells.
*
* Since this function can get called in contexts where we're already holding
- * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
* worker.
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
@@ -823,7 +823,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
/*
* We might already be holding dev->mode_config.mutex, so do this in a
- * seperate worker
+ * separate worker
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
@@ -844,7 +844,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* of the powerwells.
*
* Since this function can get called in contexts where we're already holding
- * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
* worker.
*
* Also used during driver init to initialize connector->polled
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 476ac88087e0..2137ac7b882a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -197,7 +197,7 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
* of an interrupt context. To avoid that read-modify-write cycles
- * interfer, these bits are protected by a spinlock. Since this
+ * interfere, these bits are protected by a spinlock. Since this
* function is usually not called from a context where the lock is
* held already, this function acquires the lock itself. A non-locking
* version is also available.
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 29705c159119..f4d60e77aa18 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -221,7 +221,7 @@ assert_link_limit_change_valid(struct intel_display *display,
* limits in @new_limits if there is a BW limitation.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @new_limits got updated
* with fallback values with which the configuration of all CRTCs
* in @state must be recomputed
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index f11626176fe2..59551c8414c2 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -77,12 +77,12 @@
#include "intel_lpe_audio.h"
#include "intel_pci_config.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
+#define HAS_LPE_AUDIO(display) ((display)->audio.lpe.platdev)
static struct platform_device *
-lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+lpe_audio_platdev_create(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
struct platform_device_info pinfo = {};
struct resource *rsc;
struct platform_device *platdev;
@@ -98,7 +98,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
+ rsc[0].start = display->audio.lpe.irq;
+ rsc[0].end = display->audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
@@ -109,7 +110,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
- pinfo.parent = dev_priv->drm.dev;
+ pinfo.parent = display->drm->dev;
pinfo.name = "hdmi-lpe-audio";
pinfo.id = -1;
pinfo.res = rsc;
@@ -118,8 +119,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
- pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+ pdata->num_pipes = INTEL_NUM_PIPES(display);
+ pdata->num_ports = display->platform.cherryview ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
pdata->port[2].pipe = -1;
@@ -130,7 +131,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -140,7 +141,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return platdev;
}
-static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+static void lpe_audio_platdev_destroy(struct intel_display *display)
{
/* XXX Note that platform_device_register_full() allocates a dma_mask
* and never frees it. We can't free it here as we cannot guarantee
@@ -150,7 +151,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->display.audio.lpe.platdev);
+ platform_device_unregister(display->audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,11 +168,12 @@ static struct irq_chip lpe_audio_irqchip = {
.irq_unmask = lpe_audio_irq_unmask,
};
-static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
+static int lpe_audio_irq_init(struct intel_display *display)
{
- int irq = dev_priv->display.audio.lpe.irq;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int irq = display->audio.lpe.irq;
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
&lpe_audio_irqchip,
handle_simple_irq,
@@ -180,11 +182,11 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
return irq_set_chip_data(irq, dev_priv);
}
-static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
+static bool lpe_audio_detect(struct intel_display *display)
{
int lpe_present = false;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
static const struct pci_device_id atom_hdaudio_ids[] = {
/* Baytrail */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
@@ -194,7 +196,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
@@ -202,34 +204,34 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
return lpe_present;
}
-static int lpe_audio_setup(struct drm_i915_private *dev_priv)
+static int lpe_audio_setup(struct intel_display *display)
{
int ret;
- dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
- if (dev_priv->display.audio.lpe.irq < 0) {
- drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->display.audio.lpe.irq);
- ret = dev_priv->display.audio.lpe.irq;
+ display->audio.lpe.irq = irq_alloc_desc(0);
+ if (display->audio.lpe.irq < 0) {
+ drm_err(display->drm, "Failed to allocate IRQ desc: %d\n",
+ display->audio.lpe.irq);
+ ret = display->audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
+ drm_dbg(display->drm, "irq = %d\n", display->audio.lpe.irq);
- ret = lpe_audio_irq_init(dev_priv);
+ ret = lpe_audio_irq_init(display);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
- dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+ display->audio.lpe.platdev = lpe_audio_platdev_create(display);
- if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
- ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
- drm_err(&dev_priv->drm,
+ if (IS_ERR(display->audio.lpe.platdev)) {
+ ret = PTR_ERR(display->audio.lpe.platdev);
+ drm_err(display->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
@@ -238,54 +240,54 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
/* enable chicken bit; at least this is required for Dell Wyse 3040
* with DP outputs (but only sometimes by some reason!)
*/
- intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ intel_de_write(display, VLV_AUD_CHICKEN_BIT_REG,
VLV_CHICKEN_BIT_DBG_ENABLE);
return 0;
err_free_irq:
- irq_free_desc(dev_priv->display.audio.lpe.irq);
+ irq_free_desc(display->audio.lpe.irq);
err:
- dev_priv->display.audio.lpe.irq = -1;
- dev_priv->display.audio.lpe.platdev = NULL;
+ display->audio.lpe.irq = -1;
+ display->audio.lpe.platdev = NULL;
return ret;
}
/**
* intel_lpe_audio_irq_handler() - forwards the LPE audio irq
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* the LPE Audio irq is forwarded to the irq handler registered by LPE audio
* driver.
*/
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_irq_handler(struct intel_display *display)
{
int ret;
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
+ ret = generic_handle_irq(display->audio.lpe.irq);
if (ret)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"error handling LPE audio irq: %d\n", ret);
}
/**
* intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
* driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* Return: 0 if successful. non-zero if detection or
* llocation/initialization fails
*/
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+int intel_lpe_audio_init(struct intel_display *display)
{
int ret = -ENODEV;
- if (lpe_audio_detect(dev_priv)) {
- ret = lpe_audio_setup(dev_priv);
+ if (lpe_audio_detect(display)) {
+ ret = lpe_audio_setup(display);
if (ret < 0)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"failed to setup LPE Audio bridge\n");
}
return ret;
@@ -294,27 +296,27 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
/**
* intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
* audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* release all the resources for LPE audio <-> i915 bridge.
*/
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_teardown(struct intel_display *display)
{
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- lpe_audio_platdev_destroy(dev_priv);
+ lpe_audio_platdev_destroy(display);
- irq_free_desc(dev_priv->display.audio.lpe.irq);
+ irq_free_desc(display->audio.lpe.irq);
- dev_priv->display.audio.lpe.irq = -1;
- dev_priv->display.audio.lpe.platdev = NULL;
+ display->audio.lpe.irq = -1;
+ display->audio.lpe.platdev = NULL;
}
/**
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
* @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
@@ -323,7 +325,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
*
* Notify lpe audio driver of eld change.
*/
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
@@ -332,15 +334,15 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
struct intel_hdmi_lpe_audio_port_pdata *ppdata;
u32 audio_enable;
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
+ pdata = dev_get_platdata(&display->audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
- audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
+ audio_enable = intel_de_read(display, VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -349,7 +351,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
- intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ intel_de_write(display, VLV_AUD_PORT_EN_DBG(port),
audio_enable & ~VLV_AMP_MUTE);
} else {
memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
@@ -358,12 +360,12 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
- intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ intel_de_write(display, VLV_AUD_PORT_EN_DBG(port),
audio_enable | VLV_AMP_MUTE);
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(display->audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index 2c5fcb6e1fd0..5234e11fd662 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -10,27 +10,27 @@
enum port;
enum transcoder;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+int intel_lpe_audio_init(struct intel_display *display);
+void intel_lpe_audio_teardown(struct intel_display *display);
+void intel_lpe_audio_irq_handler(struct intel_display *display);
+void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#else
-static inline int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+static inline int intel_lpe_audio_init(struct intel_display *display)
{
return -ENODEV;
}
-static inline void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+static inline void intel_lpe_audio_teardown(struct intel_display *display)
{
}
-static inline void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+static inline void intel_lpe_audio_irq_handler(struct intel_display *display)
{
}
-static inline void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+static inline void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d75dd17fad32..0c8bf477c2fb 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -212,7 +212,8 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
- err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode);
+ err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode,
+ lspcon_get_mode_settle_timeout(lspcon));
if (err < 0) {
drm_err(display->drm, "LSPCON mode change failed\n");
return err;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 9a2bea19f17b..10cdfdad82e4 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -15,6 +15,7 @@
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
+#include "intel_cmtg.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
@@ -978,6 +979,8 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_pch_sanitize(i915);
+ intel_cmtg_sanitize(display);
+
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index ca30fff61876..bbb0db33740e 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -46,7 +46,8 @@
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
* UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
- * the mininum of both. */
+ * the minimum of both.
+ */
#define IMAGE_MAX_WIDTH 2048
#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
/* on 830 and 845 these large limits result in the card hanging */
@@ -408,10 +409,12 @@ static int intel_overlay_off(struct intel_overlay *overlay)
drm_WARN_ON(display->drm, !overlay->active);
- /* According to intel docs the overlay hw may hang (when switching
+ /*
+ * According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
- * of the hw. Do it in both cases */
+ * of the hw. Do it in both cases.
+ */
flip_addr |= OFC_UPDATE;
rq = alloc_request(overlay, intel_overlay_off_tail);
@@ -442,16 +445,19 @@ static int intel_overlay_off(struct intel_overlay *overlay)
return i915_active_wait(&overlay->last_flip);
}
-/* recover from an interruption due to a signal
- * We have to be careful not to repeat work forever an make forward progess. */
+/*
+ * Recover from an interruption due to a signal.
+ * We have to be careful not to repeat work forever an make forward progress.
+ */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
return i915_active_wait(&overlay->last_flip);
}
-/* Wait for pending overlay flip and release old frame.
+/*
+ * Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs
+ * via intel_overlay_(un)map_regs.
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 975520322136..63301a01906c 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -609,7 +609,7 @@ intel_pmdemand_program_params(struct intel_display *display,
goto unlock;
drm_dbg_kms(display->drm,
- "initate pmdemand request values: (0x%x 0x%x)\n",
+ "initiate pmdemand request values: (0x%x 0x%x)\n",
mod_reg1, mod_reg2);
intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index eb35f0249f2b..c0f65749a3f6 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -1501,8 +1501,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
if (!pps_delays_valid(vbt))
return;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
+ /*
+ * On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Occasionally the panel
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 0b021acb330f..2bdb6c9c2283 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -154,7 +154,7 @@
*
* Unfortunately CHICKEN_TRANS itself seems to be double buffered
* and thus won't latch until the first vblank. So with DC states
- * enabled the register effctively uses the reset value during DC5
+ * enabled the register effectively uses the reset value during DC5
* exit+PSR exit sequence, and thus the bit does nothing until
* latched by the vblank that it was trying to prevent from being
* generated in the first place. So we should probably call this
@@ -171,7 +171,7 @@
* CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
*
* On BDW without this bit is no vblanks whatsoever are
- * generated after PSR exit. On HSW this has no apparant effect.
+ * generated after PSR exit. On HSW this has no apparent effect.
* WaPsrDPRSUnmaskVBlankInSRD says to set this.
*
* The rest of the bits are more self-explanatory and/or
@@ -185,7 +185,7 @@
* has_psr + has_panel_replay: Panel Replay
* has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
*
- * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * Description of some intel_psr variables. enabled, panel_replay_enabled,
* sel_update_enabled
*
* enabled (alone): PSR1
@@ -814,8 +814,8 @@ static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
intel_psr_enable_sink_alpm(intel_dp, crtc_state);
@@ -827,6 +827,13 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
+void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
+{
+ if (CAN_PANEL_REPLAY(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+}
+
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1043,7 +1050,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
};
/*
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
- * comments bellow for more information
+ * comments below for more information
*/
int tmp;
@@ -1991,18 +1998,25 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled) {
+ if (intel_dp->psr.panel_replay_enabled)
drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
- } else {
+ else
drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
- /*
- * Panel replay has to be enabled before link training: doing it
- * only for PSR here.
- */
- intel_psr_enable_sink(intel_dp, crtc_state);
- }
+ /*
+ * Enabling here only for PSR. Panel Replay enable bit is already
+ * written at this point. See
+ * intel_psr_panel_replay_enable_sink. Modifiers/options:
+ * - Selective Update
+ * - Region Early Transport
+ * - Selective Update Region Scanline Capture
+ * - VSC_SDP_CRC
+ * - HPD on different Errors
+ * - CRC verification
+ * are written for PSR and Panel Replay here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dig_port->base, true);
@@ -2808,6 +2822,8 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
psr->su_region_et_enabled;
+ needs_to_disable |= new_crtc_state->has_panel_replay !=
+ psr->panel_replay_enabled;
needs_to_disable |= DISPLAY_VER(i915) < 11 &&
new_crtc_state->wm_level_disabled;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 956be263c09e..5f1671d02d76 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -28,8 +28,7 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder);
bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 498b35ec4e0f..c78da5a2b559 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1741,8 +1741,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
* the sdvo port register, on all other platforms it is part of the dpll
* state. Since the general pipe state readout happens before the
- * encoder->get_config we so already have a valid pixel multplier on all
- * other platfroms.
+ * encoder->get_config we so already have a valid pixel multiplier on all
+ * other platforms.
*/
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index 54f099abefeb..56c4551abefd 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -244,7 +244,7 @@ struct intel_sdvo_set_target_input_args {
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
- * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * Affected commands include SET_OUTPUT_TIMINGS_PART[12],
* GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
*/
#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
new file mode 100644
index 000000000000..c6321dafef4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Synopsys, Inc., Intel Corporation
+ */
+
+#include <linux/math.h>
+
+#include "intel_cx0_phy_regs.h"
+#include "intel_display_types.h"
+#include "intel_snps_phy.h"
+#include "intel_snps_phy_regs.h"
+#include "intel_snps_hdmi_pll.h"
+
+#define INTEL_SNPS_PHY_HDMI_4999MHZ 4999999900ULL
+#define INTEL_SNPS_PHY_HDMI_16GHZ 16000000000ULL
+#define INTEL_SNPS_PHY_HDMI_9999MHZ (2 * INTEL_SNPS_PHY_HDMI_4999MHZ)
+
+#define CURVE0_MULTIPLIER 1000000000
+#define CURVE1_MULTIPLIER 100
+#define CURVE2_MULTIPLIER 1000000000000ULL
+
+struct pll_output_params {
+ u32 ssc_up_spread;
+ u32 mpll_div5_en;
+ u32 hdmi_div;
+ u32 ana_cp_int;
+ u32 ana_cp_prop;
+ u32 refclk_postscalar;
+ u32 tx_clk_div;
+ u32 fracn_quot;
+ u32 fracn_rem;
+ u32 fracn_den;
+ u32 fracn_en;
+ u32 pmix_en;
+ u32 multiplier;
+ int mpll_ana_v2i;
+ int ana_freq_vco;
+};
+
+static s64 interp(s64 x, s64 x1, s64 x2, s64 y1, s64 y2)
+{
+ s64 dydx;
+
+ dydx = DIV_ROUND_UP_ULL((y2 - y1) * 100000, (x2 - x1));
+
+ return (y1 + DIV_ROUND_UP_ULL(dydx * (x - x1), 100000));
+}
+
+static void get_ana_cp_int_prop(u32 vco_clk,
+ u32 refclk_postscalar,
+ int mpll_ana_v2i,
+ int c, int a,
+ const u64 curve_freq_hz[2][8],
+ const u64 curve_0[2][8],
+ const u64 curve_1[2][8],
+ const u64 curve_2[2][8],
+ u32 *ana_cp_int,
+ u32 *ana_cp_prop)
+{
+ u64 vco_div_refclk_float;
+ u64 curve_0_interpolated;
+ u64 curve_2_interpolated;
+ u64 curve_1_interpolated;
+ u64 curve_2_scaled1;
+ u64 curve_2_scaled2;
+ u64 adjusted_vco_clk1;
+ u64 adjusted_vco_clk2;
+ u64 curve_2_scaled_int;
+ u64 interpolated_product;
+ u64 scaled_interpolated_sqrt;
+ u64 scaled_vco_div_refclk1;
+ u64 scaled_vco_div_refclk2;
+ u64 ana_cp_int_temp;
+ u64 temp;
+
+ vco_div_refclk_float = vco_clk * DIV_ROUND_DOWN_ULL(1000000000000ULL, refclk_postscalar);
+
+ /* Interpolate curve values at the target vco_clk frequency */
+ curve_0_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_0[c][a], curve_0[c][a + 1]);
+
+ curve_2_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_2[c][a], curve_2[c][a + 1]);
+
+ curve_1_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_1[c][a], curve_1[c][a + 1]);
+
+ curve_1_interpolated = DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE1_MULTIPLIER);
+
+ /*
+ * Scale curve_2_interpolated based on mpll_ana_v2i, for integer part
+ * ana_cp_int and for the proportional part ana_cp_prop
+ */
+ temp = curve_2_interpolated * (4 - mpll_ana_v2i);
+ curve_2_scaled1 = DIV_ROUND_DOWN_ULL(temp, 16000);
+ curve_2_scaled2 = DIV_ROUND_DOWN_ULL(temp, 160);
+
+ /* Scale vco_div_refclk for ana_cp_int */
+ scaled_vco_div_refclk1 = 112008301 * DIV_ROUND_DOWN_ULL(vco_div_refclk_float, 100000);
+
+ adjusted_vco_clk1 = CURVE2_MULTIPLIER *
+ DIV_ROUND_DOWN_ULL(scaled_vco_div_refclk1, (curve_0_interpolated *
+ DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER)));
+
+ ana_cp_int_temp =
+ DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
+ CURVE2_MULTIPLIER);
+
+ *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
+
+ curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int);
+
+ interpolated_product = curve_1_interpolated *
+ (curve_2_scaled_int * DIV_ROUND_DOWN_ULL(curve_0_interpolated,
+ CURVE0_MULTIPLIER));
+
+ scaled_interpolated_sqrt =
+ int_sqrt(DIV_ROUND_UP_ULL(interpolated_product, vco_div_refclk_float) *
+ DIV_ROUND_DOWN_ULL(1000000000000ULL, 55));
+
+ /* Scale vco_div_refclk for ana_cp_int */
+ scaled_vco_div_refclk2 = DIV_ROUND_UP_ULL(vco_div_refclk_float, 1000000);
+ adjusted_vco_clk2 = 1460281 * DIV_ROUND_UP_ULL(scaled_interpolated_sqrt *
+ scaled_vco_div_refclk2,
+ curve_1_interpolated);
+
+ *ana_cp_prop = DIV_ROUND_UP_ULL(adjusted_vco_clk2, curve_2_scaled2);
+ *ana_cp_prop = max(1, min(*ana_cp_prop, 127));
+}
+
+static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
+ u32 ref_range,
+ u32 ana_cp_int_gs,
+ u32 ana_cp_prop_gs,
+ const u64 curve_freq_hz[2][8],
+ const u64 curve_0[2][8],
+ const u64 curve_1[2][8],
+ const u64 curve_2[2][8],
+ u32 prescaler_divider,
+ struct pll_output_params *pll_params)
+{
+ u64 datarate = pixel_clock * 10000;
+ u32 ssc_up_spread = 1;
+ u32 mpll_div5_en = 1;
+ u32 hdmi_div = 1;
+ u32 ana_cp_int;
+ u32 ana_cp_prop;
+ u32 refclk_postscalar = refclk >> prescaler_divider;
+ u32 tx_clk_div;
+ u64 vco_clk;
+ u64 vco_clk_do_div;
+ u32 vco_div_refclk_integer;
+ u32 vco_div_refclk_fracn;
+ u32 fracn_quot;
+ u32 fracn_rem;
+ u32 fracn_den;
+ u32 fracn_en;
+ u32 pmix_en;
+ u32 multiplier;
+ int mpll_ana_v2i;
+ int ana_freq_vco = 0;
+ int c, a = 0;
+ int i;
+
+ /* Select appropriate v2i point */
+ if (datarate <= INTEL_SNPS_PHY_HDMI_9999MHZ) {
+ mpll_ana_v2i = 2;
+ tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
+ } else {
+ mpll_ana_v2i = 3;
+ tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
+ }
+ vco_clk = (datarate << tx_clk_div) >> 1;
+
+ vco_div_refclk_integer = DIV_ROUND_DOWN_ULL(vco_clk, refclk_postscalar);
+ vco_clk_do_div = do_div(vco_clk, refclk_postscalar);
+ vco_div_refclk_fracn = DIV_ROUND_DOWN_ULL(vco_clk_do_div << 32, refclk_postscalar);
+
+ fracn_quot = vco_div_refclk_fracn >> 16;
+ fracn_rem = vco_div_refclk_fracn & 0xffff;
+ fracn_rem = fracn_rem - (fracn_rem >> 15);
+ fracn_den = 0xffff;
+ fracn_en = (fracn_quot != 0 || fracn_rem != 0) ? 1 : 0;
+ pmix_en = fracn_en;
+ multiplier = (vco_div_refclk_integer - 16) * 2;
+ /* Curve selection for ana_cp_* calculations. One curve hardcoded per v2i range */
+ c = mpll_ana_v2i - 2;
+
+ /* Find the right segment of the table */
+ for (i = 0; i < 8; i += 2) {
+ if (vco_clk <= curve_freq_hz[c][i + 1]) {
+ a = i;
+ ana_freq_vco = 3 - (a >> 1);
+ break;
+ }
+ }
+
+ get_ana_cp_int_prop(vco_clk, refclk_postscalar, mpll_ana_v2i, c, a,
+ curve_freq_hz, curve_0, curve_1, curve_2,
+ &ana_cp_int, &ana_cp_prop);
+
+ pll_params->ssc_up_spread = ssc_up_spread;
+ pll_params->mpll_div5_en = mpll_div5_en;
+ pll_params->hdmi_div = hdmi_div;
+ pll_params->ana_cp_int = ana_cp_int;
+ pll_params->refclk_postscalar = refclk_postscalar;
+ pll_params->tx_clk_div = tx_clk_div;
+ pll_params->fracn_quot = fracn_quot;
+ pll_params->fracn_rem = fracn_rem;
+ pll_params->fracn_den = fracn_den;
+ pll_params->fracn_en = fracn_en;
+ pll_params->pmix_en = pmix_en;
+ pll_params->multiplier = multiplier;
+ pll_params->ana_cp_prop = ana_cp_prop;
+ pll_params->mpll_ana_v2i = mpll_ana_v2i;
+ pll_params->ana_freq_vco = ana_freq_vco;
+}
+
+void intel_snps_hdmi_pll_compute_mpllb(struct intel_mpllb_state *pll_state, u64 pixel_clock)
+{
+ /* x axis frequencies. One curve in each array per v2i point */
+ static const u64 dg2_curve_freq_hz[2][8] = {
+ { 2500000000ULL, 3000000000ULL, 3000000000ULL, 3500000000ULL, 3500000000ULL,
+ 4000000000ULL, 4000000000ULL, 5000000000ULL },
+ { 4000000000ULL, 4600000000ULL, 4601000000ULL, 5400000000ULL, 5401000000ULL,
+ 6600000000ULL, 6601000000ULL, 8001000000ULL }
+ };
+
+ /* y axis heights multiplied with 1000000000 */
+ static const u64 dg2_curve_0[2][8] = {
+ { 34149871, 39803269, 36034544, 40601014, 35646940, 40016109, 35127987, 41889522 },
+ { 70000000, 78770454, 70451838, 80427119, 70991400, 84230173, 72945921, 87064218 }
+ };
+
+ /* Multiplied with 100 */
+ static const u64 dg2_curve_1[2][8] = {
+ { 85177000000000ULL, 79385227160000ULL, 95672603580000ULL, 88857207160000ULL,
+ 109379790900000ULL, 103528193900000ULL, 131941242400000ULL, 117279000000000ULL },
+ { 60255000000000ULL, 55569000000000ULL, 72036000000000ULL, 69509000000000ULL,
+ 81785000000000ULL, 731030000000000ULL, 96591000000000ULL, 69077000000000ULL }
+ };
+
+ /* Multiplied with 1000000000000 */
+ static const u64 dg2_curve_2[2][8] = {
+ { 2186930000ULL, 2835287134ULL, 2395395343ULL, 2932270687ULL, 2351887545ULL,
+ 2861031697ULL, 2294149152ULL, 3091730000ULL },
+ { 4560000000ULL, 5570000000ULL, 4610000000ULL, 5770000000ULL, 4670000000ULL,
+ 6240000000ULL, 4890000000ULL, 6600000000ULL }
+ };
+
+ struct pll_output_params pll_params;
+ u32 refclk = 100000000;
+ u32 prescaler_divider = 1;
+ u32 ref_range = 3;
+ u32 ana_cp_int_gs = 64;
+ u32 ana_cp_prop_gs = 124;
+
+ compute_hdmi_tmds_pll(pixel_clock, refclk, ref_range, ana_cp_int_gs, ana_cp_prop_gs,
+ dg2_curve_freq_hz, dg2_curve_0, dg2_curve_1, dg2_curve_2,
+ prescaler_divider, &pll_params);
+
+ pll_state->clock = pixel_clock;
+ pll_state->ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, ref_range);
+ pll_state->mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, pll_params.ana_cp_int) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, pll_params.ana_cp_prop) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, ana_cp_int_gs) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, ana_cp_prop_gs);
+ pll_state->mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, pll_params.mpll_div5_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, pll_params.tx_clk_div) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, pll_params.pmix_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, pll_params.mpll_ana_v2i) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, pll_params.ana_freq_vco);
+ pll_state->mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, prescaler_divider) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, pll_params.multiplier) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, pll_params.hdmi_div);
+ pll_state->mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, pll_params.fracn_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, pll_params.fracn_den);
+ pll_state->mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, pll_params.fracn_quot) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, pll_params.fracn_rem);
+ pll_state->mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, pll_params.ssc_up_spread);
+}
+
+void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u64 pixel_clock)
+{
+ /* x axis frequencies. One curve in each array per v2i point */
+ static const u64 c10_curve_freq_hz[2][8] = {
+ { 2500000000ULL, 3000000000ULL, 3000000000ULL, 3500000000ULL, 3500000000ULL,
+ 4000000000ULL, 4000000000ULL, 5000000000ULL },
+ { 4000000000ULL, 4600000000ULL, 4601000000ULL, 5400000000ULL, 5401000000ULL,
+ 6600000000ULL, 6601000000ULL, 8001000000ULL }
+ };
+
+ /* y axis heights multiplied with 1000000000 */
+ static const u64 c10_curve_0[2][8] = {
+ { 41174500, 48605500, 42973700, 49433100, 42408600, 47681900, 40297400, 49131400 },
+ { 82056800, 94420700, 82323400, 96370600, 81273300, 98630100, 81728700, 99105700}
+ };
+
+ static const u64 c10_curve_1[2][8] = {
+ { 73300000000000ULL, 66000000000000ULL, 83100000000000ULL, 75300000000000ULL,
+ 99700000000000ULL, 92300000000000ULL, 125000000000000ULL, 110000000000000ULL },
+ { 53700000000000ULL, 47700000000000ULL, 62200000000000ULL, 54400000000000ULL,
+ 75100000000000ULL, 63400000000000ULL, 90600000000000ULL, 76300000000000ULL }
+ };
+
+ /* Multiplied with 1000000000000 */
+ static const u64 c10_curve_2[2][8] = {
+ { 2415790000ULL, 3136460000ULL, 2581990000ULL, 3222670000ULL, 2529330000ULL,
+ 3042020000ULL, 2336970000ULL, 3191460000ULL},
+ { 4808390000ULL, 5994250000ULL, 4832730000ULL, 6193730000ULL, 4737700000ULL,
+ 6428750000ULL, 4779200000ULL, 6479340000ULL }
+ };
+
+ struct pll_output_params pll_params;
+ u32 refclk = 38400000;
+ u32 prescaler_divider = 0;
+ u32 ref_range = 1;
+ u32 ana_cp_int_gs = 30;
+ u32 ana_cp_prop_gs = 28;
+
+ compute_hdmi_tmds_pll(pixel_clock, refclk, ref_range,
+ ana_cp_int_gs, ana_cp_prop_gs,
+ c10_curve_freq_hz, c10_curve_0,
+ c10_curve_1, c10_curve_2, prescaler_divider,
+ &pll_params);
+
+ pll_state->tx = 0x10;
+ pll_state->cmn = 0x1;
+ pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
+ REG_FIELD_PREP(C10_PLL0_FRACEN, pll_params.fracn_en) |
+ REG_FIELD_PREP(C10_PLL0_PMIX_EN, pll_params.pmix_en) |
+ REG_FIELD_PREP(C10_PLL0_ANA_FREQ_VCO_MASK, pll_params.ana_freq_vco);
+ pll_state->pll[2] = REG_FIELD_PREP(C10_PLL2_MULTIPLIERL_MASK, pll_params.multiplier);
+ pll_state->pll[3] = REG_FIELD_PREP(C10_PLL3_MULTIPLIERH_MASK, pll_params.multiplier >> 8);
+ pll_state->pll[8] = REG_FIELD_PREP(C10_PLL8_SSC_UP_SPREAD, pll_params.ssc_up_spread);
+ pll_state->pll[9] = REG_FIELD_PREP(C10_PLL9_FRACN_DENL_MASK, pll_params.fracn_den);
+ pll_state->pll[10] = REG_FIELD_PREP(C10_PLL10_FRACN_DENH_MASK, pll_params.fracn_den >> 8);
+ pll_state->pll[11] = REG_FIELD_PREP(C10_PLL11_FRACN_QUOT_L_MASK, pll_params.fracn_quot);
+ pll_state->pll[12] = REG_FIELD_PREP(C10_PLL12_FRACN_QUOT_H_MASK,
+ pll_params.fracn_quot >> 8);
+
+ pll_state->pll[13] = REG_FIELD_PREP(C10_PLL13_FRACN_REM_L_MASK, pll_params.fracn_rem);
+ pll_state->pll[14] = REG_FIELD_PREP(C10_PLL14_FRACN_REM_H_MASK, pll_params.fracn_rem >> 8);
+ pll_state->pll[15] = REG_FIELD_PREP(C10_PLL15_TXCLKDIV_MASK, pll_params.tx_clk_div) |
+ REG_FIELD_PREP(C10_PLL15_HDMIDIV_MASK, pll_params.hdmi_div);
+ pll_state->pll[16] = REG_FIELD_PREP(C10_PLL16_ANA_CPINT, pll_params.ana_cp_int) |
+ REG_FIELD_PREP(C10_PLL16_ANA_CPINTGS_L, ana_cp_int_gs);
+ pll_state->pll[17] = REG_FIELD_PREP(C10_PLL17_ANA_CPINTGS_H_MASK, ana_cp_int_gs >> 1) |
+ REG_FIELD_PREP(C10_PLL17_ANA_CPPROP_L_MASK, pll_params.ana_cp_prop);
+ pll_state->pll[18] =
+ REG_FIELD_PREP(C10_PLL18_ANA_CPPROP_H_MASK, pll_params.ana_cp_prop >> 2) |
+ REG_FIELD_PREP(C10_PLL18_ANA_CPPROPGS_L_MASK, ana_cp_prop_gs);
+
+ pll_state->pll[19] = REG_FIELD_PREP(C10_PLL19_ANA_CPPROPGS_H_MASK, ana_cp_prop_gs >> 3) |
+ REG_FIELD_PREP(C10_PLL19_ANA_V2I_MASK, pll_params.mpll_ana_v2i);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h
new file mode 100644
index 000000000000..aac70c4bb0f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Synopsys, Inc., Intel Corporation
+ */
+
+#ifndef __INTEL_SNPS_HDMI_PLL_H__
+#define __INTEL_SNPS_HDMI_PLL_H__
+
+#include <linux/types.h>
+
+struct intel_c10pll_state;
+struct intel_mpllb_state;
+
+void intel_snps_hdmi_pll_compute_mpllb(struct intel_mpllb_state *pll_state, u64 pixel_clock);
+void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u64 pixel_clock);
+
+#endif /* __INTEL_SNPS_HDMI_PLL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 41fe26dc200b..353221d3e29f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -11,6 +11,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
#include "intel_snps_phy_regs.h"
@@ -521,7 +522,7 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
-/* values in the below table are calculted using the algo */
+/* values in the below table are calculated using the algo */
static const struct intel_mpllb_state dg2_hdmi_25200 = {
.clock = 25200,
.ref_control =
@@ -1788,24 +1789,9 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_mpllb_state * const *tables;
int i;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- if (intel_snps_phy_check_hdmi_link_rate(crtc_state->port_clock)
- != MODE_OK) {
- /*
- * FIXME: Can only support fixed HDMI frequencies
- * until we have a proper algorithm under a valid
- * license.
- */
- drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n",
- crtc_state->port_clock);
- return -EINVAL;
- }
- }
-
tables = intel_mpllb_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
@@ -1817,6 +1803,14 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
}
}
+ /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ intel_snps_hdmi_pll_compute_mpllb(&crtc_state->dpll_hw_state.mpllb,
+ crtc_state->port_clock);
+
+ return 0;
+ }
+
return -EINVAL;
}
@@ -1982,19 +1976,6 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
pll_state->mpllb_div &= ~SNPS_PHY_MPLLB_FORCE_EN;
}
-int intel_snps_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_mpllb_state * const *tables = dg2_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
-}
-
void intel_mpllb_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index bc08b92a7cd9..1dd564ed9fa8 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -30,7 +30,6 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
const struct intel_mpllb_state *pll_state);
-int intel_snps_phy_check_hdmi_link_rate(int clock);
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mpllb_state_verify(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6fadcef58e0..13996d7059ad 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -253,21 +253,6 @@ int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
-static unsigned int vlv_sprite_min_alignment(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane)
-{
- switch (fb->modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return 4 * 1024;
- case DRM_FORMAT_MOD_LINEAR:
- return 128 * 1024;
- default:
- MISSING_CASE(fb->modifier);
- return 0;
- }
-}
-
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1616,7 +1601,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
plane->max_stride = i965_plane_max_stride;
- plane->min_alignment = vlv_sprite_min_alignment;
+ plane->min_alignment = vlv_plane_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index a95fb3349eba..4efd4f7d497a 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -369,7 +369,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/*
* Already exiting vblank? If so, shift our position
- * so it looks like we're already apporaching the full
+ * so it looks like we're already approaching the full
* vblank end. This should make the generated timestamp
* more or less match when the active portion will start.
*/
@@ -507,6 +507,23 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
wait_for_pipe_scanline_moving(crtc, true);
}
+static void intel_crtc_active_timings(struct drm_display_mode *mode,
+ int *vmax_vblank_start,
+ const struct intel_crtc_state *crtc_state,
+ bool vrr_enable)
+{
+ drm_mode_init(mode, &crtc_state->hw.adjusted_mode);
+ *vmax_vblank_start = 0;
+
+ if (!vrr_enable)
+ return;
+
+ mode->crtc_vtotal = intel_vrr_vmax_vtotal(crtc_state);
+ mode->crtc_vblank_end = intel_vrr_vmax_vtotal(crtc_state);
+ mode->crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ *vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+}
+
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable)
{
@@ -517,19 +534,13 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
int vmax_vblank_start = 0;
unsigned long irqflags;
- drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
-
- if (vrr_enable) {
- drm_WARN_ON(display->drm,
- (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ intel_crtc_active_timings(&adjusted_mode, &vmax_vblank_start,
+ crtc_state, vrr_enable);
- adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- } else {
+ if (vrr_enable)
+ drm_WARN_ON(display->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ else
mode_flags &= ~I915_MODE_FLAG_VRR;
- }
/*
* Belts and suspenders locking to guarantee everyone sees 100%
@@ -597,6 +608,37 @@ int intel_mode_vtotal(const struct drm_display_mode *mode)
return vtotal;
}
+int intel_mode_vblank_delay(const struct drm_display_mode *mode)
+{
+ return intel_mode_vblank_start(mode) - intel_mode_vdisplay(mode);
+}
+
+static const struct intel_crtc_state *
+pre_commit_crtc_state(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return new_crtc_state;
+ else
+ return old_crtc_state;
+}
+
+const struct intel_crtc_state *
+intel_pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return pre_commit_crtc_state(old_crtc_state, new_crtc_state);
+}
+
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
struct intel_vblank_evade_ctx *evade)
@@ -605,6 +647,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
+ int vblank_delay;
evade->crtc = crtc;
@@ -612,16 +655,8 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
display->platform.cherryview) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- *
- * TODO: maybe just use the active timings here?
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- crtc_state = new_crtc_state;
- else
- crtc_state = old_crtc_state;
+ /* TODO: maybe just use the active timings here? */
+ crtc_state = pre_commit_crtc_state(old_crtc_state, new_crtc_state);
adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -634,8 +669,12 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+
+ vblank_delay = intel_vrr_vblank_delay(crtc_state);
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
+
+ vblank_delay = intel_mode_vblank_delay(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
@@ -653,8 +692,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
*/
if (intel_color_uses_dsb(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- evade->min -= intel_mode_vblank_start(adjusted_mode) -
- intel_mode_vdisplay(adjusted_mode);
+ evade->min -= vblank_delay;
}
/* must be called with vblank interrupt already enabled! */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 6d7336256982..21fbb08d61d5 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -11,6 +11,7 @@
struct drm_crtc;
struct drm_display_mode;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -24,6 +25,7 @@ int intel_mode_vdisplay(const struct drm_display_mode *mode);
int intel_mode_vblank_start(const struct drm_display_mode *mode);
int intel_mode_vblank_end(const struct drm_display_mode *mode);
int intel_mode_vtotal(const struct drm_display_mode *mode);
+int intel_mode_vblank_delay(const struct drm_display_mode *mode);
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
@@ -42,4 +44,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable);
int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state);
+const struct intel_crtc_state *
+intel_pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index b355c479eda3..932435a7f88d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -66,6 +66,13 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420);
}
+static int
+get_range_bpg_offset(int bpp_low, int offset_low, int bpp_high, int offset_high, int bpp)
+{
+ return offset_low + DIV_ROUND_UP((offset_high - offset_low) * (bpp - bpp_low),
+ (bpp_low - bpp_high));
+}
+
/*
* We are using the method provided in DSC 1.2a C-Model in codec_main.c
* Above method use a common formula to derive values for any combination of DSC
@@ -83,7 +90,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
int first_line_bpg_offset;
- u32 res, buf_i, bpp_i;
+ u32 buf_i, bpp_i;
if (vdsc_cfg->slice_height >= 8)
first_line_bpg_offset =
@@ -99,7 +106,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
* According to DSC 1.2 spec in Section 4.1 if native_420 is set:
* -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
* height < 8.
- * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma
+ * -second_line_offset_adj is 512 as shown by empirical values to yield best chroma
* preservation in second line.
* -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
* up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
@@ -117,7 +124,6 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->slice_height - 1);
}
- /* Our hw supports only 444 modes as of today */
if (bpp >= 12)
vdsc_cfg->initial_offset = 2048;
else if (bpp >= 10)
@@ -163,23 +169,19 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
/* Calculate range_bpg_offset */
- if (bpp <= 8) {
+ if (bpp <= 8)
range_bpg_offset = ofs_und4[buf_i];
- } else if (bpp <= 10) {
- res = DIV_ROUND_UP(((bpp - 8) *
- (ofs_und5[buf_i] - ofs_und4[buf_i])), 2);
- range_bpg_offset = ofs_und4[buf_i] + res;
- } else if (bpp <= 12) {
- res = DIV_ROUND_UP(((bpp - 10) *
- (ofs_und6[buf_i] - ofs_und5[buf_i])), 2);
- range_bpg_offset = ofs_und5[buf_i] + res;
- } else if (bpp <= 16) {
- res = DIV_ROUND_UP(((bpp - 12) *
- (ofs_und8[buf_i] - ofs_und6[buf_i])), 4);
- range_bpg_offset = ofs_und6[buf_i] + res;
- } else {
+ else if (bpp <= 10)
+ range_bpg_offset = get_range_bpg_offset(8, ofs_und4[buf_i],
+ 10, ofs_und5[buf_i], bpp);
+ else if (bpp <= 12)
+ range_bpg_offset = get_range_bpg_offset(10, ofs_und5[buf_i],
+ 12, ofs_und6[buf_i], bpp);
+ else if (bpp <= 16)
+ range_bpg_offset = get_range_bpg_offset(12, ofs_und6[buf_i],
+ 16, ofs_und8[buf_i], bpp);
+ else
range_bpg_offset = ofs_und8[buf_i];
- }
vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
@@ -215,21 +217,19 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
/* Calculate range_bpg_offset */
- if (bpp <= 6) {
+ if (bpp <= 6)
range_bpg_offset = ofs_und6[buf_i];
- } else if (bpp <= 8) {
- res = DIV_ROUND_UP(((bpp - 6) *
- (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
- range_bpg_offset = ofs_und6[buf_i] + res;
- } else if (bpp <= 12) {
- range_bpg_offset = ofs_und8[buf_i];
- } else if (bpp <= 15) {
- res = DIV_ROUND_UP(((bpp - 12) *
- (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
- range_bpg_offset = ofs_und12[buf_i] + res;
- } else {
+ else if (bpp <= 8)
+ range_bpg_offset = get_range_bpg_offset(6, ofs_und6[buf_i],
+ 8, ofs_und8[buf_i], bpp);
+ else if (bpp <= 12)
+ range_bpg_offset = get_range_bpg_offset(8, ofs_und8[buf_i],
+ 12, ofs_und12[buf_i], bpp);
+ else if (bpp <= 15)
+ range_bpg_offset = get_range_bpg_offset(12, ofs_und12[buf_i],
+ 15, ofs_und15[buf_i], bpp);
+ else
range_bpg_offset = ofs_und15[buf_i];
- }
vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 70088e355055..adb51609d0a3 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -7,9 +7,9 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_vrr.h"
#include "intel_vrr_regs.h"
-#include "intel_dp.h"
#define FIXED_POINT_PRECISION 100
#define CMRR_PRECISION_TOLERANCE 10
@@ -75,6 +75,46 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
}
}
+static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.adjusted_mode.crtc_vblank_start -
+ crtc_state->hw.adjusted_mode.crtc_vdisplay;
+}
+
+static int intel_vrr_extra_vblank_delay(struct intel_display *display)
+{
+ /*
+ * On ICL/TGL VRR hardware inserts one extra scanline
+ * just after vactive, which pushes the vmin decision
+ * boundary ahead accordingly. We'll include the extra
+ * scanline in our vblank delay estimates to make sure
+ * that we never underestimate how long we have until
+ * the delayed vblank has passed.
+ */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
+}
+
+int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_real_vblank_delay(crtc_state) +
+ intel_vrr_extra_vblank_delay(display);
+}
+
+static int intel_vrr_flipline_offset(struct intel_display *display)
+{
+ /* ICL/TGL hardware imposes flipline>=vmin+1 */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
+}
+
+static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display);
+}
+
/*
* Without VRR registers get latched at:
* vblank_start
@@ -98,19 +138,41 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
if (DISPLAY_VER(display) >= 13)
return crtc_state->vrr.guardband;
else
- /* The hw imposes the extra scanline before frame start */
+ /* hardware imposes one extra scanline somewhere */
return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
}
+int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /* Min vblank actually determined by flipline */
+ if (DISPLAY_VER(display) >= 13)
+ return intel_vrr_vmin_flipline(crtc_state);
+ else
+ return intel_vrr_vmin_flipline(crtc_state) +
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_state->vrr.vmax;
+ else
+ return crtc_state->vrr.vmax +
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
{
- /* Min vblank actually determined by flipline that is always >=vmin+1 */
- return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
}
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
}
static bool
@@ -202,15 +264,17 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (vmin >= vmax)
return;
+ crtc_state->vrr.vmin = vmin;
+ crtc_state->vrr.vmax = vmax;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
/*
* flipline determines the min vblank length the hardware will
- * generate, and flipline>=vmin+1, hence we reduce vmin by one
- * to make sure we can get the actual min vblank length.
+ * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
+ * vmin by one to make sure we can get the actual min vblank length.
*/
- crtc_state->vrr.vmin = vmin - 1;
- crtc_state->vrr.vmax = vmax;
-
- crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
/*
* When panel is VRR capable and userspace has
@@ -235,7 +299,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
- if (intel_dp->as_sdp_supported && crtc_state->vrr.enable) {
+ if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
crtc_state->hw.adjusted_mode.vsync_start);
@@ -255,11 +319,20 @@ void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start;
} else {
+ /* hardware imposes one extra scanline somewhere */
crtc_state->vrr.pipeline_full =
min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
crtc_state->framestart_delay - 1);
+
+ /*
+ * vmin/vmax/flipline also need to be adjusted by
+ * the vblank delay to maintain correct vtotals.
+ */
+ crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state);
+ crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state);
+ crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state);
}
}
@@ -315,9 +388,16 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
trans_vrr_ctl(crtc_state));
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
crtc_state->vrr.flipline - 1);
+
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder),
+ VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
+ VRR_VSYNC_START(crtc_state->vrr.vsync_start));
}
-void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+void intel_vrr_send_push(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -325,8 +405,15 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN | TRANS_PUSH_SEND);
+ if (dsb)
+ intel_dsb_nonpost_start(dsb);
+
+ intel_de_write_dsb(display, dsb,
+ TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+
+ if (dsb)
+ intel_dsb_nonpost_end(dsb);
}
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
@@ -351,12 +438,6 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (HAS_AS_SDP(display))
- intel_de_write(display,
- TRANS_VRR_VSYNC(display, cpu_transcoder),
- VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
- VRR_VSYNC_START(crtc_state->vrr.vsync_start));
-
if (crtc_state->cmrr.enable) {
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
@@ -381,10 +462,6 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
-
- if (HAS_AS_SDP(display))
- intel_de_write(display,
- TRANS_VRR_VSYNC(display, cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
@@ -424,10 +501,6 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
TRANS_VRR_VMAX(display, cpu_transcoder)) + 1;
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
- }
-
- if (crtc_state->vrr.enable) {
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
@@ -439,4 +512,7 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
}
}
+
+ if (crtc_state->vrr.enable)
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index b3b45c675020..899cbf40f880 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -12,6 +12,7 @@ struct drm_connector_state;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
+struct intel_dsb;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -22,11 +23,15 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
-void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ae21fce534dc..c8bf6fd92ce8 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "skl_scaler.h"
@@ -64,7 +65,7 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
/*
* Hardware initial phase limited to [-0.5:1.5].
* Since the max hardware scale factor is 3.0, we
- * should never actually excdeed 1.0 here.
+ * should never actually exceed 1.0 here.
*/
WARN_ON(phase < -0x8000 || phase > 0x18000);
@@ -76,28 +77,60 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
return ((phase >> 2) & PS_PHASE_MASK) | trip;
}
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define TGL_MAX_SRC_W 5120
-#define TGL_MAX_SRC_H 8192
-#define TGL_MAX_DST_W 8192
-#define TGL_MAX_DST_H 8192
-#define MTL_MAX_SRC_W 4096
-#define MTL_MAX_SRC_H 8192
-#define MTL_MAX_DST_W 8192
-#define MTL_MAX_DST_H 8192
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
+static void skl_scaler_min_src_size(const struct drm_format_info *format,
+ u64 modifier, int *min_w, int *min_h)
+{
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ *min_w = 16;
+ *min_h = 16;
+ } else {
+ *min_w = 8;
+ *min_h = 8;
+ }
+}
+
+static void skl_scaler_max_src_size(struct intel_crtc *crtc,
+ int *max_w, int *max_h)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (DISPLAY_VER(display) >= 14) {
+ *max_w = 4096;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) >= 12) {
+ *max_w = 5120;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) == 11) {
+ *max_w = 5120;
+ *max_h = 4096;
+ } else {
+ *max_w = 4096;
+ *max_h = 4096;
+ }
+}
+
+static void skl_scaler_min_dst_size(int *min_w, int *min_h)
+{
+ *min_w = 8;
+ *min_h = 8;
+}
+
+static void skl_scaler_max_dst_size(struct intel_crtc *crtc,
+ int *max_w, int *max_h)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (DISPLAY_VER(display) >= 12) {
+ *max_w = 8192;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) == 11) {
+ *max_w = 5120;
+ *max_h = 4096;
+ } else {
+ *max_w = 4096;
+ *max_h = 4096;
+ }
+}
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
@@ -134,7 +167,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
drm_dbg_kms(display->drm,
- "Pipe/Plane scaling not supported with IF-ID mode\n");
+ "[CRTC:%d:%s] scaling not supported with IF-ID mode\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -154,8 +188,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scalers[*scaler_id].in_use = false;
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: "
+ "[CRTC:%d:%s] scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
@@ -163,39 +198,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
- (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(display->drm,
- "Planar YUV: src dimensions not met\n");
- return -EINVAL;
- }
+ skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h);
+ skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h);
- min_src_w = SKL_MIN_SRC_W;
- min_src_h = SKL_MIN_SRC_H;
- min_dst_w = SKL_MIN_DST_W;
- min_dst_h = SKL_MIN_DST_H;
-
- if (DISPLAY_VER(display) < 11) {
- max_src_w = SKL_MAX_SRC_W;
- max_src_h = SKL_MAX_SRC_H;
- max_dst_w = SKL_MAX_DST_W;
- max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(display) < 12) {
- max_src_w = ICL_MAX_SRC_W;
- max_src_h = ICL_MAX_SRC_H;
- max_dst_w = ICL_MAX_DST_W;
- max_dst_h = ICL_MAX_DST_H;
- } else if (DISPLAY_VER(display) < 14) {
- max_src_w = TGL_MAX_SRC_W;
- max_src_h = TGL_MAX_SRC_H;
- max_dst_w = TGL_MAX_DST_W;
- max_dst_h = TGL_MAX_DST_H;
- } else {
- max_src_w = MTL_MAX_SRC_W;
- max_src_h = MTL_MAX_SRC_H;
- max_dst_w = MTL_MAX_DST_W;
- max_dst_h = MTL_MAX_DST_H;
- }
+ skl_scaler_min_dst_size(&min_dst_w, &min_dst_h);
+ skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h);
/* range checks */
if (src_w < min_src_w || src_h < min_src_h ||
@@ -203,8 +210,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "[CRTC:%d:%s] scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, src_w, src_h,
dst_w, dst_h);
return -EINVAL;
@@ -220,16 +228,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: pipe src size %ux%u "
+ "[CRTC:%d:%s] scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(display->drm, "scaler_user index %u.%u: "
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
@@ -309,15 +319,57 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
return -1;
}
-static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+static void
+calculate_max_scale(struct intel_crtc *crtc,
+ bool is_yuv_semiplanar,
+ int scaler_id,
+ int *max_hscale, int *max_vscale)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(display) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ *max_hscale = 0x30000 - 1;
+
+ if (scaler_id == 0)
+ *max_vscale = 0x30000 - 1;
+ else
+ *max_vscale = 0x10000;
+ } else if (DISPLAY_VER(display) >= 10 || !is_yuv_semiplanar) {
+ *max_hscale = 0x30000 - 1;
+ *max_vscale = 0x30000 - 1;
+ } else {
+ *max_hscale = 0x20000 - 1;
+ *max_vscale = 0x20000 - 1;
+ }
+}
+
+static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 mode;
+ int hscale = 0;
+ int vscale = 0;
if (*scaler_id < 0)
*scaler_id = intel_allocate_scaler(scaler_state, crtc);
@@ -366,45 +418,15 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
mode = SKL_PS_SCALER_MODE_DYN;
}
- /*
- * FIXME: we should also check the scaler factors for pfit, so
- * this shouldn't be tied directly to planes.
- */
if (plane_state && plane_state->hw.fb) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_rect *src = &plane_state->uapi.src;
const struct drm_rect *dst = &plane_state->uapi.dst;
- int hscale, vscale, max_vscale, max_hscale;
-
- /*
- * FIXME: When two scalers are needed, but only one of
- * them needs to downscale, we should make sure that
- * the one that needs downscaling support is assigned
- * as the first scaler, so we don't reject downscaling
- * unnecessarily.
- */
+ int max_hscale, max_vscale;
- if (DISPLAY_VER(display) >= 14) {
- /*
- * On versions 14 and up, only the first
- * scaler supports a vertical scaling factor
- * of more than 1.0, while a horizontal
- * scaling factor of 3.0 is supported.
- */
- max_hscale = 0x30000 - 1;
- if (*scaler_id == 0)
- max_vscale = 0x30000 - 1;
- else
- max_vscale = 0x10000;
-
- } else if (DISPLAY_VER(display) >= 10 ||
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
- max_hscale = 0x30000 - 1;
- max_vscale = 0x30000 - 1;
- } else {
- max_hscale = 0x20000 - 1;
- max_vscale = 0x20000 - 1;
- }
+ calculate_max_scale(crtc,
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier),
+ *scaler_id, &max_hscale, &max_vscale);
/*
* FIXME: We should change the if-else block above to
@@ -417,8 +439,8 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (hscale < 0 || vscale < 0) {
drm_dbg_kms(display->drm,
- "Scaler %d doesn't support required plane scaling\n",
- *scaler_id);
+ "[CRTC:%d:%s] scaler %d doesn't support required plane scaling\n",
+ crtc->base.base.id, crtc->base.name, *scaler_id);
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dst, false);
@@ -426,7 +448,48 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
}
}
- drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n",
+ if (crtc_state->pch_pfit.enabled) {
+ struct drm_rect src;
+ int max_hscale, max_vscale;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ calculate_max_scale(crtc, 0, *scaler_id,
+ &max_hscale, &max_vscale);
+
+ /*
+ * When configured for Pipe YUV 420 encoding for port output,
+ * limit downscaling to less than 1.5 (source/destination) in
+ * the horizontal direction and 1.0 in the vertical direction.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ max_hscale = 0x18000 - 1;
+ max_vscale = 0x10000;
+ }
+
+ hscale = drm_rect_calc_hscale(&src, &crtc_state->pch_pfit.dst,
+ 0, max_hscale);
+ vscale = drm_rect_calc_vscale(&src, &crtc_state->pch_pfit.dst,
+ 0, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(display->drm,
+ "Scaler %d doesn't support required pipe scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", &src, true);
+ drm_rect_debug_print("dst: ", &crtc_state->pch_pfit.dst, false);
+
+ return -EINVAL;
+ }
+ }
+
+ scaler_state->scalers[*scaler_id].hscale = hscale;
+ scaler_state->scalers[*scaler_id].vscale = vscale;
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] attached scaler id %u.%u to %s:%d\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
@@ -441,7 +504,7 @@ static int setup_crtc_scaler(struct intel_atomic_state *state,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- return intel_atomic_setup_scaler(scaler_state,
+ return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "CRTC", crtc->base.base.id,
NULL, &scaler_state->scaler_id);
@@ -476,7 +539,7 @@ static int setup_plane_scaler(struct intel_atomic_state *state,
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
- return intel_atomic_setup_scaler(scaler_state,
+ return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "PLANE", plane->base.base.id,
plane_state, &plane_state->scaler_id);
@@ -526,7 +589,8 @@ int intel_atomic_setup_scalers(struct intel_atomic_state *state,
/* fail if required scalers > available scalers */
if (num_scalers_need > crtc->num_scalers) {
drm_dbg_kms(display->drm,
- "Too many scaling requests %d > %d\n",
+ "[CRTC:%d:%s] too many scaling requests %d > %d\n",
+ crtc->base.base.id, crtc->base.name,
num_scalers_need, crtc->num_scalers);
return -EINVAL;
}
@@ -573,31 +637,31 @@ static u16 glk_nearest_filter_coef(int t)
* The letter represents the filter tap (D is the center tap) and the number
* represents the coefficient set for a phase (0-16).
*
- * +------------+------------------------+------------------------+
- * |Index value | Data value coeffient 1 | Data value coeffient 2 |
- * +------------+------------------------+------------------------+
- * | 00h | B0 | A0 |
- * +------------+------------------------+------------------------+
- * | 01h | D0 | C0 |
- * +------------+------------------------+------------------------+
- * | 02h | F0 | E0 |
- * +------------+------------------------+------------------------+
- * | 03h | A1 | G0 |
- * +------------+------------------------+------------------------+
- * | 04h | C1 | B1 |
- * +------------+------------------------+------------------------+
- * | ... | ... | ... |
- * +------------+------------------------+------------------------+
- * | 38h | B16 | A16 |
- * +------------+------------------------+------------------------+
- * | 39h | D16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Ah | F16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Bh | Reserved | G16 |
- * +------------+------------------------+------------------------+
+ * +------------+--------------------------+--------------------------+
+ * |Index value | Data value coefficient 1 | Data value coefficient 2 |
+ * +------------+--------------------------+--------------------------+
+ * | 00h | B0 | A0 |
+ * +------------+--------------------------+--------------------------+
+ * | 01h | D0 | C0 |
+ * +------------+--------------------------+--------------------------+
+ * | 02h | F0 | E0 |
+ * +------------+--------------------------+--------------------------+
+ * | 03h | A1 | G0 |
+ * +------------+--------------------------+--------------------------+
+ * | 04h | C1 | B1 |
+ * +------------+--------------------------+--------------------------+
+ * | ... | ... | ... |
+ * +------------+--------------------------+--------------------------+
+ * | 38h | B16 | A16 |
+ * +------------+--------------------------+--------------------------+
+ * | 39h | D16 | C16 |
+ * +------------+--------------------------+--------------------------+
+ * | 3Ah | F16 | C16 |
+ * +------------+--------------------------+--------------------------+
+ * | 3Bh | Reserved | G16 |
+ * +------------+--------------------------+--------------------------+
*
- * To enable nearest-neighbor scaling: program scaler coefficents with
+ * To enable nearest-neighbor scaling: program scaler coefficients with
* the center tap (Dxx) values set to 1 and all other values set to 0 as per
* SCALER_COEFFICIENT_FORMAT
*
@@ -695,6 +759,8 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
+
skl_scaler_setup_filter(display, pipe, id, 0,
crtc_state->hw.scaling_filter);
@@ -759,6 +825,9 @@ skl_program_plane_scaler(struct intel_plane *plane,
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ trace_intel_plane_scaler_update_arm(plane, scaler_id,
+ crtc_x, crtc_y, crtc_w, crtc_h);
+
skl_scaler_setup_filter(display, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
@@ -777,6 +846,8 @@ static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
struct intel_display *display = to_intel_display(crtc);
+ trace_intel_scaler_disable_arm(crtc, id);
+
intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ff9764cac1e7..ba5db553c374 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
@@ -516,6 +512,79 @@ skl_plane_max_stride(struct intel_plane *plane,
max_pixels, max_bytes);
}
+static bool tgl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return true;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ return false;
+ default:
+ return false;
+ }
+}
+
+static bool icl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ /*
+ * FIXME: Async on Linear buffer is supported on ICL
+ * but with additional alignment and fbc restrictions
+ * need to be taken care of.
+ */
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool skl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return true;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /*
+ * Display WA #0731: skl
+ * WaDisableRCWithAsyncFlip: skl
+ * "When render decompression is enabled, hardware
+ * internally converts the Async flips to Sync flips."
+ *
+ * Display WA #1159: glk
+ * "Async flip with render compression may result in
+ * intermittent underrun corruption."
+ */
+ return false;
+ default:
+ return false;
+ }
+}
+
static u32 tgl_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
@@ -528,28 +597,30 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return mult * 4 * 1024;
+ /*
+ * FIXME ADL sees GGTT/DMAR faults with async
+ * flips unless we align to 16k at least.
+ * Figure out what's going on here...
+ */
+ if (IS_ALDERLAKE_P(i915) &&
+ intel_plane_can_async_flip(plane, fb->modifier))
+ return mult * 16 * 1024;
+
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_4_TILED:
- /*
- * FIXME ADL sees GGTT/DMAR faults with async
- * flips unless we align to 16k at least.
- * Figure out what's going on here...
- */
- if (IS_ALDERLAKE_P(i915) && HAS_ASYNC_FLIPS(i915))
- return mult * 16 * 1024;
return mult * 4 * 1024;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
- case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
- case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_BMG_CCS:
case I915_FORMAT_MOD_4_TILED_LNL_CCS:
/*
@@ -1024,7 +1095,7 @@ static u32 skl_plane_ctl_rotate(unsigned int rotate)
break;
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
+ * while i915 HW rotation is clockwise, that's why this swapping.
*/
case DRM_MODE_ROTATE_90:
return PLANE_CTL_ROTATE_270;
@@ -1229,8 +1300,8 @@ static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
return plane_surf;
}
-static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
- int color_plane)
+u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -2540,59 +2611,74 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
- /* Wa_22011186057 */
- if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
- return false;
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_1 || plane_id == PLANE_2);
+}
- if (DISPLAY_VER(i915) >= 11)
- return true;
+static u8 skl_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf;
- if (IS_GEMINILAKE(i915))
- return pipe != PIPE_C;
+ if (skl_plane_has_rc_ccs(i915, pipe, plane_id))
+ caps |= INTEL_PLANE_CAP_CCS_RC;
- return pipe != PIPE_C &&
- (plane_id == PLANE_1 || plane_id == PLANE_2);
+ return caps;
+}
+
+static bool glk_plane_has_rc_ccs(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ return pipe != PIPE_C;
+}
+
+static u8 glk_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf;
+
+ if (glk_plane_has_rc_ccs(i915, pipe))
+ caps |= INTEL_PLANE_CAP_CCS_RC;
+
+ return caps;
+}
+
+static u8 icl_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ return INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf |
+ INTEL_PLANE_CAP_CCS_RC;
}
static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915,
enum plane_id plane_id)
{
- if (DISPLAY_VER(i915) < 12)
- return false;
-
/* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
- (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
- return false;
-
- /* Wa_22011186057 */
- if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
return false;
return plane_id < PLANE_6;
}
-static u8 skl_get_plane_caps(struct drm_i915_private *i915,
- enum pipe pipe, enum plane_id plane_id)
+static u8 tgl_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
{
struct intel_display *display = &i915->display;
- u8 caps = INTEL_PLANE_CAP_TILING_X;
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_CCS_RC |
+ INTEL_PLANE_CAP_CCS_RC_CC;
- if (DISPLAY_VER(display) < 13 || display->platform.alderlake_p)
- caps |= INTEL_PLANE_CAP_TILING_Y;
- if (DISPLAY_VER(display) < 12)
- caps |= INTEL_PLANE_CAP_TILING_Yf;
if (HAS_4TILE(display))
caps |= INTEL_PLANE_CAP_TILING_4;
-
- if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
- return caps;
-
- if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
- caps |= INTEL_PLANE_CAP_CCS_RC;
- if (DISPLAY_VER(display) >= 12)
- caps |= INTEL_PLANE_CAP_CCS_RC_CC;
- }
+ else
+ caps |= INTEL_PLANE_CAP_TILING_Y;
if (tgl_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
@@ -2616,6 +2702,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
const u32 *formats;
int num_formats;
int ret;
+ u8 caps;
plane = intel_plane_alloc();
if (IS_ERR(plane))
@@ -2671,11 +2758,18 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
- if (plane_id == PLANE_1) {
+ if (HAS_ASYNC_FLIPS(dev_priv) && plane_id == PLANE_1) {
plane->need_async_flip_toggle_wa = IS_DISPLAY_VER(dev_priv, 9, 10);
plane->async_flip = skl_plane_async_flip;
plane->enable_flip_done = skl_plane_enable_flip_done;
plane->disable_flip_done = skl_plane_disable_flip_done;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ plane->can_async_flip = tgl_plane_can_async_flip;
+ else if (DISPLAY_VER(dev_priv) == 11)
+ plane->can_async_flip = icl_plane_can_async_flip;
+ else
+ plane->can_async_flip = skl_plane_can_async_flip;
}
if (DISPLAY_VER(dev_priv) >= 11)
@@ -2700,8 +2794,22 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- modifiers = intel_fb_plane_get_modifiers(dev_priv,
- skl_get_plane_caps(dev_priv, pipe, plane_id));
+ if (DISPLAY_VER(dev_priv) >= 12)
+ caps = tgl_plane_caps(dev_priv, pipe, plane_id);
+ else if (DISPLAY_VER(dev_priv) == 11)
+ caps = icl_plane_caps(dev_priv, pipe, plane_id);
+ else if (DISPLAY_VER(dev_priv) == 10)
+ caps = glk_plane_caps(dev_priv, pipe, plane_id);
+ else
+ caps = skl_plane_caps(dev_priv, pipe, plane_id);
+
+ /* FIXME: xe has problems with AUX */
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(dev_priv))
+ caps &= ~(INTEL_PLANE_CAP_CCS_RC |
+ INTEL_PLANE_CAP_CCS_RC_CC |
+ INTEL_PLANE_CAP_CCS_MC);
+
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, caps);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
@@ -2885,7 +2993,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
+ * while i915 HW rotation is clockwise, that's why this swapping.
*/
switch (val & PLANE_CTL_ROTATE_MASK) {
case PLANE_CTL_ROTATE_0:
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
index 541489479135..18b41d13f0bd 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -37,4 +37,7 @@ bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
u8 icl_hdr_plane_mask(void);
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
+ int color_plane);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index f4458d1185b3..45fe4aaeb450 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -584,7 +584,7 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
/*
* Per plane DDB entry can in a really worst case be on multiple slices
- * but single entry is anyway contigious.
+ * but single entry is anyway contiguous.
*/
while (start_slice <= end_slice) {
slice_mask |= BIT(start_slice);
@@ -2292,6 +2292,87 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
+static int
+cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state)) {
+ drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
+ return 1;
+ }
+
+ return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
+ 2 * cdclk_state->logical.cdclk));
+}
+
+static int
+dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
+ crtc_state->hw.adjusted_mode.clock);
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ int chroma_downscaling_factor =
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
+ u32 dsc_prefill_latency = 0;
+
+ if (!crtc_state->dsc.compression_enable || !num_scaler_users)
+ return dsc_prefill_latency;
+
+ dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
+
+ for (int i = 0; i < num_scaler_users; i++) {
+ u64 hscale_k, vscale_k;
+
+ hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
+ vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
+ dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k,
+ 1000000);
+ }
+
+ dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency);
+}
+
+static int
+scaler_prefill_latency(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ int scaler_prefill_latency = 0;
+ int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
+ crtc_state->hw.adjusted_mode.clock);
+
+ if (!num_scaler_users)
+ return scaler_prefill_latency;
+
+ scaler_prefill_latency = 4 * linetime;
+
+ if (num_scaler_users > 1) {
+ u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
+ u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
+ int chroma_downscaling_factor =
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
+ int latency;
+
+ latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k *
+ chroma_downscaling_factor), 1000000);
+ scaler_prefill_latency += latency;
+ }
+
+ scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency);
+}
+
static bool
skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
int wm0_lines, int latency)
@@ -2299,9 +2380,10 @@ skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- /* FIXME missing scaler and DSC pre-fill time */
return crtc_state->framestart_delay +
intel_usecs_to_scanlines(adjusted_mode, latency) +
+ scaler_prefill_latency(crtc_state) +
+ dsc_prefill_latency(crtc_state) +
wm0_lines >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
}
@@ -3204,7 +3286,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* WaWmMemoryReadLatency
*
* punit doesn't take into account the read latency so we need
- * to add proper adjustement to each valid level we retrieve
+ * to add proper adjustment to each valid level we retrieve
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d49e9b3c7627..0333c4d9b703 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -59,7 +59,7 @@ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
8 * 100), lane_count);
}
-/* return pixels equvalent to txbyteclkhs */
+/* return pixels equivalent to txbyteclkhs */
static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
u16 burst_mode_ratio)
{
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 59a50647f2c3..ac69eaece0fd 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -459,7 +459,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
/*
* rx divider value needs to be updated in the
- * two differnt bit fields in the register hence splitting the
+ * two different bit fields in the register hence splitting the
* rx divider value accordingly
*/
rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index c0543c35cd6a..ab1af978911b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -238,7 +238,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(i915)))
@@ -1589,7 +1589,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(ctx->i915)))
@@ -2328,7 +2328,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
/*
* One for the xarray and one for the caller. We need to grab
- * the reference *prior* to making the ctx visble to userspace
+ * the reference *prior* to making the ctx visible to userspace
* in gem_context_register(), as at any point after that
* userspace can try to race us with another thread destroying
* the context under our feet.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index b6d97da63d1f..67ac2586a0f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -245,9 +245,9 @@ struct i915_gem_context {
* Execbuf uses the I915_EXEC_RING_MASK as an index into this
* array to select which HW context + engine to execute on. For
* the default array, the user_ring_map[] is used to translate
- * the legacy uABI onto the approprate index (e.g. both
+ * the legacy uABI onto the appropriate index (e.g. both
* I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
- * context, and I915_EXEC_BSD is weird). For a use defined
+ * context, and I915_EXEC_BSD is weird). For a user defined
* array, execbuf uses I915_EXEC_RING_MASK as a plain index.
*
* User defined by I915_CONTEXT_PARAM_ENGINE (when the
@@ -276,7 +276,7 @@ struct i915_gem_context {
* @vm: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
+ * complete separation of one client from all others.
*
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3770828f2eaf..ee55caca67a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -276,7 +276,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* For objects created by userspace through GEM_CREATE with pat_index
* set by set_pat extension, simply return 0 here without touching
* the cache setting, because such objects should have an immutable
- * cache setting by desgin and always managed by userspace.
+ * cache setting by design and always managed by userspace.
*/
if (i915_gem_object_has_cache_level(obj, cache_level))
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f151640c1d13..c8107502190d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -303,7 +303,7 @@ struct i915_execbuffer {
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
/**
- * Indicate either the size of the hastable used to resolve
+ * Indicate either the size of the hashtable used to resolve
* relocation handles, or if negative that we are using a direct
* index into the execobj[].
*/
@@ -2543,7 +2543,7 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
/*
* Error path, cannot use intel_context_timeline_lock as
- * that is user interruptable and this clean up step
+ * that is user interruptible and this clean up step
* must be done.
*/
mutex_lock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index b09b74a2448b..636768d0f57e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -82,7 +82,7 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
/*
* Anything smaller than the min_page_size can't be freely inserted into
- * the GTT, due to alignemnt restrictions. For such special objects,
+ * the GTT, due to alignment restrictions. For such special objects,
* make sure we force memcpy based suspend-resume. In the future we can
* revisit this, either by allowing special mis-aligned objects in the
* migration path, or by mapping all of LMEM upfront using cheap 1G
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 9117e9422844..aec41f0f098f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -25,7 +25,7 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Consider only shrinkable ojects. */
+ /* Consider only shrinkable objects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
@@ -261,7 +261,7 @@ skip:
* i915_gem_shrink_all - Shrink buffer object caches completely
* @i915: i915 device
*
- * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding
* requests to also be able to release backing storage for active objects.
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index d9eb84c1d2f1..5ac23ff3feff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -39,7 +39,7 @@
* Since neither of this applies for new tiling layouts on modern platforms like
* W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
* Anything else can be handled in userspace entirely without the kernel's
- * invovlement.
+ * involvement.
*/
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 10d8673641f7..1f4814968868 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -994,7 +994,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
- * portion. And likewise when we try to find space for a mappble
+ * portion. And likewise when we try to find space for a mappable
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 041dab543b78..2f6b33edb9c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -603,7 +603,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
* sequence, where at the end we can do the move for real.
*
* The special case here is when the dst_mem is TTM_PL_SYSTEM,
- * which doens't require any kind of move, so it should be safe
+ * which doesn't require any kind of move, so it should be safe
* to skip all the below and call ttm_bo_move_null() here, where
* the caller in __i915_ttm_get_pages() will take care of the
* rest, since we should have a valid ttm_tt.
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 84d41e6ccf05..bd08605a1611 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1781,7 +1781,7 @@ static int igt_tmpfs_fallback(void *arg)
/*
* Make sure that we don't burst into a ball of flames upon falling back
- * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * to tmpfs, which we rely on if on the off-chance we encounter a failure
* when setting up gemfs.
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 99a9ade73956..804f74084bd4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1342,7 +1342,7 @@ static int igt_mmap_migrate(void *arg)
}
/*
- * Allocate in the mappable portion, should be no suprises here.
+ * Allocate in the mappable portion, should be no surprises here.
*/
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 4904d0f4162c..8116fd5987e2 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -179,7 +179,7 @@ u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
}
-/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+/* Just userspace ABI convention to limit the wa batch bo to a reasonable size */
#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4d30a86016f2..ec136eb12d48 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -308,7 +308,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
/*
* There is a discrepancy here between the size reported
* by the register and the size of the context layout
- * in the docs. Both are described as authorative!
+ * in the docs. Both are described as authoritative!
*
* The discrepancy is on the order of a few cachelines,
* but the total is under one page (4k), which is our
@@ -845,7 +845,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
* Note that we have a catch-22 situation where we need to be able to access
* the blitter forcewake domain to read the engine fuses, but at the same time
* we need to know which engines are available on the system to know which
- * forcewake domains are present. We solve this by intializing the forcewake
+ * forcewake domains are present. We solve this by initializing the forcewake
* domains based on the full engine mask in the platform capabilities before
* calling this function and pruning the domains for fused-off engines
* afterwards.
@@ -1411,7 +1411,7 @@ create_ggtt_bind_context(struct intel_engine_cs *engine)
/*
* MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
- * bind requets at a time so get a bigger ring.
+ * bind requests at a time so get a bigger ring.
*/
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
I915_GEM_HWS_GGTT_BIND_ADDR,
@@ -1533,7 +1533,7 @@ int intel_engines_init(struct intel_gt *gt)
/**
* intel_engine_cleanup_common - cleans up the engine state created by
- * the common initiailizers.
+ * the common initializers.
* @engine: Engine to cleanup.
*
* This cleans up everything created by the common helpers.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index fe1f85e5dda3..155b6255a63e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -237,7 +237,7 @@ struct intel_engine_execlists {
*/
struct i915_request * const *active;
/**
- * @inflight: the set of contexts submitted and acknowleged by HW
+ * @inflight: the set of contexts submitted and acknowledged by HW
*
* The set of inflight contexts is managed by reading CS events
* from the HW. On a context-switch event (not preemption), we
@@ -260,7 +260,7 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @virtual: Queue of requets on a virtual engine, sorted by priority.
+ * @virtual: Queue of requests on a virtual engine, sorted by priority.
* Each RB entry is a struct i915_priolist containing a list of requests
* of the same priority.
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 1240d44eeb85..75e802e10be2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -480,7 +480,7 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled. Same wil be the case for GuC interrupts.
+ * is enabled/disabled. Same will be the case for GuC interrupts.
*/
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier);
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index b8912bd6c08e..aab20d6466f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -239,7 +239,7 @@ static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
* to remain in multicast mode for reads. There's no real
* downside to this, so we'll just go ahead and do so on all
* platforms; we'll only clear the multicast bit from the mask
- * when exlicitly doing a write operation.
+ * when explicitly doing a write operation.
*/
if (rw_flag == FW_REG_WRITE)
mcr_mask |= GEN11_MCR_MULTICAST;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 6f7af4077135..aff5aca591e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -304,7 +304,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
struct intel_context *ce;
/*
- * We randomly distribute contexts across the engines upon constrction,
+ * We randomly distribute contexts across the engines upon construction,
* as they all share the same pinned vm, and so in order to allow
* multiple blits to run in parallel, we must construct each blit
* to use a different range of the vm for its GTT. This has to be
@@ -646,7 +646,7 @@ calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
* will be taken for the blt. in Flat-ccs supported
* platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
+ * for main memory hence limit it to the required size
* for main memory
*/
return min_t(u64, bytes_to_cpy, CHUNK_SZ);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index d791d63d49b4..cf41d325712e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -675,7 +675,7 @@ void intel_mocs_init(struct intel_gt *gt)
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
/*
- * Initialize the L3CC table as part of mocs initalization to make
+ * Initialize the L3CC table as part of mocs initialization to make
* sure the LNCFCMOCSx registers are programmed for the subsequent
* memory transactions including guc transactions
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index aae5a081cb53..5a625518d1a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1098,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
dma_fence_put(fence);
- /* Restart iteration after droping lock */
+ /* Restart iteration after dropping lock */
spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 458e29d89978..6e9977b2d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -242,7 +242,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
/*
* In case of resets fails because engine resumes from
* incorrect RING_HEAD and then GPU may be then fed
- * to invalid instrcutions, which may lead to unrecoverable
+ * to invalid instructions, which may lead to unrecoverable
* hang. So at first write doesn't succeed then try again.
*/
ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 6507fa3f6d1e..5135b90a2a40 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -40,7 +40,7 @@ enum {
/**
* struct intel_rps_freq_caps - rps freq capabilities
* @rp0_freq: non-overclocked max frequency
- * @rp1_freq: "less than" RP0 power/freqency
+ * @rp1_freq: "less than" RP0 power/frequency
* @min_freq: aka RPn, minimum frequency
*
* Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
@@ -90,7 +90,7 @@ struct intel_rps {
u8 boost_freq; /* Frequency to request when wait boosting */
u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp1_freq; /* "less than" RP0 power/frequency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
index 8c1dbcbcbc4f..2945526d52d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_sa_media.c
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -27,7 +27,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
/*
* Standalone media shares the general MMIO space with the primary
- * GT. We'll re-use the primary GT's mapping.
+ * GT. We'll reuse the primary GT's mapping.
*/
uncore->regs = intel_uncore_regs(&i915->uncore);
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index c8fadf58d836..e4538dd726c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -687,7 +687,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
* According to documentation software must consider the configuration
* as 2x4x8 and hardware will translate this to 1x8x8.
*
- * Furthemore, even though SScount is three bits, maximum documented
+ * Furthermore, even though SScount is three bits, maximum documented
* value for it is four. From this some rules/restrictions follow:
*
* 1.
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 570c91878189..3ea9b06de1be 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1318,7 +1318,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
* We'll do our default/implicit steering based on GSLICE (in the
* sliceid field) and DSS (in the subsliceid field). If we can
* find overlap between the valid MSLICE and/or LNCF values with
- * a suitable GSLICE, then we can just re-use the default value and
+ * a suitable GSLICE, then we can just reuse the default value and
* skip and explicit steering at runtime.
*
* We only need to look for overlap between GSLICE/MSLICE/LNCF to find
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 81c31396eceb..d7717de17ecc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -53,7 +53,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) /* that was quick! */
return 0;
- /* Wait until the HW has acknowleged the submission (or err) */
+ /* Wait until the HW has acknowledged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9d3aeb237295..f057c16410e7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -548,7 +548,7 @@ static int igt_reset_fail_engine(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- /* Check that we can recover from engine-reset failues */
+ /* Check that we can recover from engine-reset failures */
if (!intel_has_reset_engine(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index e17b8777d21d..22e750108c5f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -63,7 +63,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) /* that was quick! */
return 0;
- /* Wait until the HW has acknowleged the submission (or err) */
+ /* Wait until the HW has acknowledged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 27b6d51ef145..908483ab0bc8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -222,7 +222,7 @@ int live_rc6_ctx_wa(void *arg)
i915_reset_engine_count(error, engine);
const u32 *res;
- /* Use a sacrifical context */
+ /* Use a sacrificial context */
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index c207a4fb03bf..78c03e6c0861 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -22,7 +22,7 @@
#include "selftests/igt_spinner.h"
#include "selftests/librapl.h"
-/* Try to isolate the impact of cstates from determing frequency response */
+/* Try to isolate the impact of cstates from determining frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
static void dummy_rps_work(struct work_struct *wrk)
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
index e7e96d7073c7..22f8dabed434 100644
--- a/drivers/gpu/drm/i915/gt/shaders/README
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -10,7 +10,7 @@ i915/gt/shaders/clear_kernel directory.
The generated .c files should never be modified directly. Instead, any modification
needs to be done on the on their respective ASM files and build instructions below
-needes to be followed.
+needs to be followed.
Building
========
@@ -24,7 +24,7 @@ on building.
Please make sure your Mesa tool is compiled with "-Dtools=intel" and
"-Ddri-drivers=i965", and run this script from IGT source root directory"
-The instructions bellow assume:
+The instructions below assume:
* IGT gpu tools source code is located on your home directory (~) as ~/igt
* Mesa source code is located on your home directory (~) as ~/mesa
and built under the ~/mesa/build directory
@@ -43,4 +43,4 @@ igt $ ./scripts/generate_clear_kernel.sh -g ivb \
~/igt/lib/i915/shaders/clear_kernel/hsw.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g hsw \
- -m ~/mesa/build/src/intel/tools/i965_asm \ No newline at end of file
+ -m ~/mesa/build/src/intel/tools/i965_asm
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
index 5fdf384bb621..6c0c89daf96c 100644
--- a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
- * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
*
* Binding Table
*
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
index 97c7ac9e3854..27c28e63d6cc 100644
--- a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
- * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
*
* Binding Table
*
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 1fc0c17b1230..803c0379d97d 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -81,7 +81,7 @@ struct guc_debug_capture_list {
*
* intel_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
@@ -200,7 +200,7 @@ struct intel_guc_state_capture {
* dynamically allocate new nodes when receiving the G2H notification
* because the event handlers for all G2H event-processing is called
* by the ct processing worker queue and when that queue is being
- * processed, there is no absoluate guarantee that we are not in the
+ * processed, there is no absolute guarantee that we are not in the
* midst of a GT reset operation (which doesn't allow allocations).
*/
struct list_head cachelist;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5949ff0b0161..9df80c325fc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -690,7 +690,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* H2G MMIO command completes.
*
* Don't abort on a failure code from the GuC. Keep going and do the
- * clean up in santize() and re-initialisation on resume and hopefully
+ * clean up in sanitize() and re-initialisation on resume and hopefully
* the error here won't be problematic.
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 57b903132776..053780f562c1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -295,7 +295,7 @@ struct intel_guc {
*/
struct work_struct dead_guc_worker;
/**
- * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance
+ * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrence
* used to prevent a fundamentally broken system from continuously
* reloading the GuC.
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 4ce6e2332a63..eded00f0c7e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -408,7 +408,7 @@ enum guc_capture_type {
GUC_CAPTURE_LIST_TYPE_MAX,
};
-/* Class indecies for capture_class and capture_instance arrays */
+/* Class indices for capture_class and capture_instance arrays */
enum {
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 706fffca698b..1a0e1a412fdb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -357,21 +357,29 @@ static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
}
-static void slpc_shared_data_reset(struct slpc_shared_data *data)
+static void slpc_shared_data_reset(struct intel_guc_slpc *slpc)
{
- memset(data, 0, sizeof(struct slpc_shared_data));
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct slpc_shared_data *data = slpc->vaddr;
+ memset(data, 0, sizeof(struct slpc_shared_data));
data->header.size = sizeof(struct slpc_shared_data);
/* Enable only GTPERF task, disable others */
slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
SLPC_PARAM_TASK_DISABLE_GTPERF);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
- SLPC_PARAM_TASK_DISABLE_BALANCER);
+ /*
+ * Don't allow balancer related algorithms on platforms before
+ * Xe_LPG, where GuC started to restrict it to TDP limited scenarios.
+ */
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) {
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
- SLPC_PARAM_TASK_DISABLE_DCC);
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+ }
}
/**
@@ -686,7 +694,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
GEM_BUG_ON(!slpc->vma);
- slpc_shared_data_reset(slpc->vaddr);
+ slpc_shared_data_reset(slpc);
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
@@ -791,6 +799,23 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
drm_printf(p, "\tGTPERF task active: %s\n",
str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tDCC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_DCC_TASK_ENABLED));
+ drm_printf(p, "\tDCC in: %s\n",
+ str_yes_no(slpc_tasks->status & SLPC_IN_DCC));
+ drm_printf(p, "\tBalancer enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_ENABLED));
+ drm_printf(p, "\tIBC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_IBC_TASK_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT active: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ACTIVE));
drm_printf(p, "\tMax freq: %u MHz\n",
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 12f1ba7ca9c1..3b1333a24a89 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1223,7 +1223,7 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
* determine validity of these values. Instead we read the values multiple times
* until they are consistent. In test runs, 3 attempts results in consistent
* values. The upper bound is set to 6 attempts and may need to be tuned as per
- * any new occurences.
+ * any new occurrences.
*/
static void __get_engine_usage_record(struct intel_engine_cs *engine,
u32 *last_in, u32 *id, u32 *total)
@@ -2995,7 +2995,7 @@ static int __guc_context_pin(struct intel_context *ce,
/*
* GuC context gets pinned in guc_request_alloc. See that function for
- * explaination of why.
+ * explanation of why.
*/
return lrc_pin(ce, engine, vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 5b8080ec5315..90ba1b0b4c9d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -512,7 +512,7 @@ static int __uc_init_hw(struct intel_uc *uc)
ERR_PTR(ret), attempts);
}
- /* Did we succeded or run out of retries? */
+ /* Did we succeed or run out of retries? */
if (ret)
goto err_log_capture;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 26fdc392fce6..83801c992488 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -64,7 +64,7 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
+ gt_err(gt, "Failed to boost heartbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6439c8e91a8d..f25ee2953baf 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1906,7 +1906,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
unsigned long start_offset = 0;
- /* get the start gm address of the batch buffer */
+ /* Get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
@@ -1921,15 +1921,16 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
- /* the start_offset stores the batch buffer's start gma's
- * offset relative to page boundary. so for non-privileged batch
+ /*
+ * The start_offset stores the batch buffer's start gma's
+ * offset relative to page boundary. So for non-privileged batch
* buffer, the shadowed gem object holds exactly the same page
- * layout as original gem object. This is for the convience of
+ * layout as original gem object. This is for the convenience of
* replacing the whole non-privilged batch buffer page to this
- * shadowed one in PPGTT at the same gma address. (this replacing
+ * shadowed one in PPGTT at the same gma address. (This replacing
* action is not implemented yet now, but may be necessary in
* future).
- * for prileged batch buffer, we just change start gma address to
+ * For prileged batch buffer, we just change start gma address to
* that of shadowed page.
*/
if (bb->ppgtt)
@@ -1976,7 +1977,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
- * As the shadow batch buffer is just a copy from the originial one,
+ * As the shadow batch buffer is just a copy from the original one,
* it should be right to use shadow batch buffer'va and original batch
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 95570cabdf27..c98dfcc3d0de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -97,7 +97,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
-static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+static const unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
{
/* EDID with 1024x768 as its resolution */
/*Header*/
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 9efc3ca0ce82..4f599af766b0 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -436,7 +436,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
dmabuf_obj_get(dmabuf_obj);
}
ret = 0;
- gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+ gvt_dbg_dpy("vgpu%d: reuse dmabuf_obj ref %d, id %d\n",
vgpu->id, kref_read(&dmabuf_obj->kref),
gfx_plane_info->dmabuf_id);
mutex_unlock(&vgpu->dmabuf_lock);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 0a357ca42db1..89147d33168c 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -298,7 +298,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
int byte_count = byte_left;
u32 reg_data = 0;
- /* Data can only be recevied if previous settings correct */
+ /* Data can only be received if previous settings correct */
if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 15cce973e1ae..f9f7ef131371 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -398,120 +398,3 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
-
-#define SPRITE_FORMAT_NUM (1 << 3)
-
-static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
- [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
- [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
- [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
- [0x4] = {DRM_FORMAT_AYUV, 32,
- "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
-};
-
-/**
- * intel_vgpu_decode_sprite_plane - Decode sprite plane
- * @vgpu: input vgpu
- * @plane: sprite plane to save decoded info
- * This function is called for decoding plane
- *
- * Returns:
- * 0 on success, non-zero if failed.
- */
-int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
- struct intel_vgpu_sprite_plane_format *plane)
-{
- u32 val, fmt;
- u32 color_order, yuv_order;
- int drm_format;
- int pipe;
-
- pipe = get_active_pipe(vgpu);
- if (pipe >= I915_MAX_PIPES)
- return -ENODEV;
-
- val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
- plane->enabled = !!(val & SPRITE_ENABLE);
- if (!plane->enabled)
- return -ENODEV;
-
- plane->tiled = !!(val & SPRITE_TILED);
- color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
- yuv_order = (val & SPRITE_YUV_ORDER_MASK) >>
- _SPRITE_YUV_ORDER_SHIFT;
-
- fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT;
- if (!sprite_pixel_formats[fmt].bpp) {
- gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
- return -EINVAL;
- }
- plane->hw_format = fmt;
- plane->bpp = sprite_pixel_formats[fmt].bpp;
- drm_format = sprite_pixel_formats[fmt].drm_format;
-
- /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
- * BGR depending on the state of the color_order field
- */
- if (!color_order) {
- if (drm_format == DRM_FORMAT_XRGB2101010)
- drm_format = DRM_FORMAT_XBGR2101010;
- else if (drm_format == DRM_FORMAT_XRGB8888)
- drm_format = DRM_FORMAT_XBGR8888;
- }
-
- if (drm_format == DRM_FORMAT_YUV422) {
- switch (yuv_order) {
- case 0:
- drm_format = DRM_FORMAT_YUYV;
- break;
- case 1:
- drm_format = DRM_FORMAT_UYVY;
- break;
- case 2:
- drm_format = DRM_FORMAT_YVYU;
- break;
- case 3:
- drm_format = DRM_FORMAT_VYUY;
- break;
- default:
- /* yuv_order has only 2 bits */
- break;
- }
- }
-
- plane->drm_format = drm_format;
-
- plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!vgpu_gmadr_is_valid(vgpu, plane->base))
- return -EINVAL;
-
- plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
- if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
- plane->base);
- return -EINVAL;
- }
-
- plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
- _SPRITE_STRIDE_MASK;
-
- val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
- plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
- _SPRITE_SIZE_HEIGHT_SHIFT;
- plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
- _SPRITE_SIZE_WIDTH_SHIFT;
- plane->height += 1; /* raw height is one minus the real value */
- plane->width += 1; /* raw width is one minus the real value */
-
- val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
- plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
- plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
-
- val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
- plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
- _SPRITE_OFFSET_START_X_SHIFT;
- plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
- _SPRITE_OFFSET_START_Y_SHIFT;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index fa6503900c84..436d43c0087b 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -156,7 +156,5 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane);
-int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
- struct intel_vgpu_sprite_plane_format *plane);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 1bce1493b86f..2fa7ca19ba5d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,72 +71,6 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
return false;
}
-/* translate a guest gmadr to host gmadr */
-int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
-{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
-
- if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
- return -EACCES;
-
- if (vgpu_gmadr_is_aperture(vgpu, g_addr))
- *h_addr = vgpu_aperture_gmadr_base(vgpu)
- + (g_addr - vgpu_aperture_offset(vgpu));
- else
- *h_addr = vgpu_hidden_gmadr_base(vgpu)
- + (g_addr - vgpu_hidden_offset(vgpu));
- return 0;
-}
-
-/* translate a host gmadr to guest gmadr */
-int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
-{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
-
- if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
- return -EACCES;
-
- if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
- *g_addr = vgpu_aperture_gmadr_base(vgpu)
- + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
- else
- *g_addr = vgpu_hidden_gmadr_base(vgpu)
- + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
- return 0;
-}
-
-int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
- unsigned long *h_index)
-{
- u64 h_addr;
- int ret;
-
- ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
- &h_addr);
- if (ret)
- return ret;
-
- *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
- return 0;
-}
-
-int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
- unsigned long *g_index)
-{
- u64 g_addr;
- int ret;
-
- ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
- &g_addr);
- if (ret)
- return ret;
-
- *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
- return 0;
-}
-
#define gtt_type_is_entry(type) \
(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
&& type != GTT_TYPE_PPGTT_PTE_ENTRY \
@@ -1259,7 +1193,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
gvt_vdbg_mm("shadow 64K gtt entry\n");
/*
* The layout of 64K page is special, the page size is
- * controlled by uper PDE. To be simple, we always split
+ * controlled by upper PDE. To be simple, we always split
* 64K page to smaller 4K pages in shadow PT.
*/
return split_64KB_gtt_entry(vgpu, spt, index, &se);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2c95aeef4e41..01d890999f25 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -452,8 +452,10 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
-/* Macros for easily accessing vGPU virtual/shadow register.
- Explicitly seperate use for typed MMIO reg or real offset.*/
+/*
+ * Macros for easily accessing vGPU virtual/shadow register.
+ * Explicitly separate use for typed MMIO reg or real offset.
+ */
#define vgpu_vreg_t(vgpu, reg) \
(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg(vgpu, offset) \
@@ -531,12 +533,6 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
gvt_gmadr_is_hidden(gvt, gmadr))
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
-int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
-int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
-int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
- unsigned long *h_index);
-int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
- unsigned long *g_index);
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary);
@@ -702,7 +698,7 @@ static inline void intel_gvt_mmio_set_cmd_write_patch(
* @offset: register offset
*
* Returns:
- * True if GPU commmand write to an MMIO should be patched
+ * True if GPU command write to an MMIO should be patched.
*/
static inline bool intel_gvt_mmio_is_cmd_write_patch(
struct intel_gvt *gvt, unsigned int offset)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 241cff0fc683..4efee6797873 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -689,11 +689,11 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
u32 new_rate = 0;
u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
- /* Calcuate pixel clock by (ls_clk * M / N) */
+ /* Calculate pixel clock by (ls_clk * M / N) */
pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
pixel_clk *= MSEC_PER_SEC;
- /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
+ /* Calculate refresh rate by (pixel_clk / (h_total * v_total)) */
new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
if (*old_rate != new_rate)
@@ -2001,7 +2001,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
* vGPU reset if in resuming.
* In S0ix exit, the device power state also transite from D3 to D0 as
- * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+ * S3 resume, but no vGPU reset (triggered by QEMU device model). After
* S0ix exit, all engines continue to work. However the d3_entered
* remains set which will break next vGPU reset logic (miss the expected
* PPGTT invalidation).
@@ -3119,23 +3119,6 @@ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
- * force-nopriv register
- *
- * @gvt: a GVT device
- * @offset: register offset
- *
- * Returns:
- * True if the register is in force-nonpriv whitelist;
- * False if outside;
- */
-bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
- unsigned int offset)
-{
- return in_whitelist(offset);
-}
-
-/**
* intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
* @vgpu: a vGPU
* @offset: register offset
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b27ff77bfb50..69830a5c49d3 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -142,7 +142,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
int ret;
/*
- * We pin the pages one-by-one to avoid allocating a big arrary
+ * We pin the pages one-by-one to avoid allocating a big array
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 32ebacb078e8..3dc912aba80b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -96,9 +96,6 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
-bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
- unsigned int offset);
-
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 273db14fd5fc..2f7208843367 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -53,7 +53,7 @@ struct engine_mmio {
u32 value;
};
-/* Raw offset is appened to each line for convenience. */
+/* Raw offset is append to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */
{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
@@ -576,8 +576,8 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
/**
* We are using raw mmio access wrapper to improve the
- * performace for batch mmio read/write, so we need
- * handle forcewake mannually.
+ * performance for batch mmio read/write, so we need
+ * handle forcewake manually.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, engine);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 23f2cc397ec9..6e87c10bc454 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -77,7 +77,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
}
/*
- * when populating shadow ctx from guest, we should not overrride oa related
+ * When populating shadow ctx from guest, we should not override oa related
* registers, so that they will not be overlapped by guest oa configs. Thus
* made it possible to capture oa data from host for both host and guests.
*/
@@ -528,9 +528,10 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
- /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
+ /*
+ * For privilege batch buffer and not wa_ctx, the bb_start_cmd_va
* is only updated into ring_scan_buffer, not real ring address
- * allocated in later copy_workload_to_ring_buffer. pls be noted
+ * allocated in later copy_workload_to_ring_buffer. Please be noted
* shadow_ring_buffer_va is now pointed to real ring buffer va
* in copy_workload_to_ring_buffer.
*/
@@ -546,7 +547,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
* here, rather than switch to shadow bb's gma
* address, we directly use original batch buffer's
* gma address, and send original bb to hardware
- * directly
+ * directly.
*/
if (!bb->ppgtt) {
i915_gem_ww_ctx_init(&ww, false);
@@ -1774,7 +1775,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * intel_vgpu_queue_workload - Queue a vGPU workload
* @workload: the workload to queue in
*/
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 63c751ca4119..11260392234a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -78,7 +78,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
* vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
* generation type (e.g V4 as BDW server, V5 as SKL server).
*
- * Depening on the physical SKU resource, we might see vGPU types like
+ * Depending on the physical SKU resource, we might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
* vGPU on same physical GPU depending on available resource. Each vGPU
* type will have a different number of avail_instance to indicate how
@@ -417,7 +417,7 @@ out_unlock:
* the whole vGPU to default state as when it is created. This vGPU function
* is required both for functionary and security concerns.The ultimate goal
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
- * assign a vGPU to a virtual machine we must isse such reset first.
+ * assign a vGPU to a virtual machine we must issue such reset first.
*
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
@@ -428,7 +428,7 @@ out_unlock:
*
* The parameter dev_level is to identify if we will do DMLR or GT reset.
* The parameter engine_mask is to specific the engines that need to be
- * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * reset. If value ALL_ENGINES is given for engine_mask, it means
* the caller requests a full GT reset that we will reset all virtual
* GPU engines. For FLR, engine_mask is ignored.
*/
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c2ae37d6b94d..91a7748f4492 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -1130,7 +1130,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* leave the device in D0 on those platforms and hope the BIOS will
* power down the device properly. The issue was seen on multiple old
* GENs with different BIOS vendors, so having an explicit blacklist
- * is inpractical; apply the workaround on everything pre GEN6. The
+ * is impractical; apply the workaround on everything pre GEN6. The
* platforms where the issue was seen:
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 070ab6546987..8c8d43451f35 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1146,11 +1146,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
int ret;
/*
- * In the proccess of replacing cache_level with pat_index a tricky
+ * In the process of replacing cache_level with pat_index a tricky
* dependency is created on the definition of the enum i915_cache_level.
- * in case this enum is changed, PTE encode would be broken.
+ * In case this enum is changed, PTE encode would be broken.
* Add a WARNING here. And remove when we completely quit using this
- * enum
+ * enum.
*/
BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
I915_CACHE_LLC != 1 ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7920ad9585ae..f98e5cc14724 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -207,6 +207,7 @@ out:
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -260,7 +261,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
- intel_lpe_audio_irq_handler(dev_priv);
+ intel_lpe_audio_irq_handler(display);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -293,6 +294,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -343,7 +345,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT))
- intel_lpe_audio_irq_handler(dev_priv);
+ intel_lpe_audio_irq_handler(display);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1231,7 +1233,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
}
/**
- * intel_irq_uninstall - finilizes all irq handling
+ * intel_irq_uninstall - finalizes all irq handling
* @dev_priv: i915 device instance
*
* This stops interrupt and hotplug handling and unregisters and frees all
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 7ed6d70389af..2f88970cc0a9 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -24,7 +24,7 @@ static int i915_check_nomodeset(void)
bool use_kms = true;
/*
- * Enable KMS by default, unless explicitly overriden by
+ * Enable KMS by default, unless explicitly overridden by
* either the i915.modeset parameter or by the
* nomodeset boot option.
*/
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5384d1bb4923..bec164e884ae 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -548,7 +548,8 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
bool pollin;
u32 partial_report_size;
- /* We have to consider the (unlikely) possibility that read() errors
+ /*
+ * We have to consider the (unlikely) possibility that read() errors
* could result in an OA buffer reset which might reset the head and
* tail state.
*/
@@ -557,7 +558,8 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
hw_tail -= gtt_offset;
- /* The tail pointer increases in 64 byte increments, not in report_size
+ /*
+ * The tail pointer increases in 64 byte increments, not in report_size
* steps. Also the report size may not be a power of 2. Compute
* potentially partially landed report in the OA buffer
*/
@@ -569,8 +571,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
tail = hw_tail;
- /* Walk the stream backward until we find a report with report
- * id and timestmap not at 0. Since the circular buffer pointers
+ /*
+ * Walk the stream backward until we find a report with report
+ * id and timestamp not at 0. Since the circular buffer pointers
* progress by increments of 64 bytes and that reports can be up
* to 256 bytes long, we can't tell whether a report has fully
* landed in memory before the report id and timestamp of the
@@ -3849,7 +3852,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
}
/*
- * Asking for SSEU configuration is a priviliged operation.
+ * Asking for SSEU configuration is a privileged operation.
*/
if (props->has_sseu)
privileged_op = true;
@@ -4478,14 +4481,16 @@ static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
static u32 mask_reg_value(u32 reg, u32 val)
{
- /* HALF_SLICE_CHICKEN2 is programmed with a the
+ /*
+ * HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
- /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
+ /*
+ * WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8e66d63d0c9f..0ec78c2b4f20 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -103,7 +103,7 @@ struct i915_pmu {
/**
* @timer_last:
*
- * Timestmap of the previous timer invocation.
+ * Timestamp of the previous timer invocation.
*/
ktime_t timer_last;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 765e6c0528fb..670cd2371f94 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -84,7 +84,7 @@
* Try to name registers according to the specs. If the register name changes in
* the specs from platform to another, stick to the original name.
*
- * Try to re-use existing register macro definitions. Only add new macros for
+ * Try to reuse existing register macro definitions. Only add new macros for
* new register offsets, or when the register contents have changed enough to
* warrant a full redefinition.
*
@@ -492,8 +492,9 @@
#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-/* Make render/texture TLB fetches lower priorty than associated data
- * fetches. This is not turned on by default
+/*
+ * Make render/texture TLB fetches lower priority than associated data
+ * fetches. This is not turned on by default.
*/
#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
@@ -3197,6 +3198,10 @@
#define _TRANS_DP2_VFREQLOW_D 0x630a8
#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
/* SNB eDP training params */
/* SNB A-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
@@ -3565,6 +3570,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8f62cfa23fb7..f8c584ce3295 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -473,7 +473,7 @@ static bool __request_in_flight(const struct i915_request *signal)
* to avoid tearing.]
*
* Note that the read of *execlists->active may race with the promotion
- * of execlists->pending[] to execlists->inflight[], overwritting
+ * of execlists->pending[] to execlists->inflight[], overwriting
* the value at *execlists->active. This is fine. The promotion implies
* that we received an ACK from the HW, and so the context is not
* stuck -- if we do not see ourselves in *active, the inflight status
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 0ac55b2e4223..5f7e8138ec14 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -161,7 +161,7 @@ enum {
* parent-child relationship (parallel submission, multi-lrc) that
* hit an error while generating requests in the execbuf IOCTL.
* Indicates this request should be skipped as another request in
- * submission / relationship encoutered an error.
+ * submission / relationship encountered an error.
*/
I915_FENCE_FLAG_SKIP_PARALLEL,
@@ -187,7 +187,7 @@ enum {
* RCU lookup of it that may race against reallocation of the struct
* from the slab freelist. We intentionally do not zero the structure on
* allocation so that the lookup can use the dangling pointers (and is
- * cogniscent that those pointers may be wrong). Instead, everything that
+ * cognisant that those pointers may be wrong). Instead, everything that
* needs to be initialised must be done so explicitly.
*
* The requests are reference counted.
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 776f8cc51b2f..61b49007ecd4 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -778,8 +778,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* @flags: mask of PIN_* flags to use
*
* First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
+ * the VMA. Failing that, if the flags permit, it will evict an old VMA,
+ * preferably the oldest idle entry to make room for the new VMA.
*
* Returns:
* 0 on success, negative error code otherwise.
@@ -877,7 +877,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
- * forseeable future. See also i915_ggtt_offset().
+ * foreseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
@@ -1001,7 +1001,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
+ * here is just a convenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index f76642886569..387b26400169 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -682,7 +682,7 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
* Have FBC ignore 3D activity since we use software
* render tracking, and otherwise a pure 3D workload
* (even if it just renders a single frame and then does
- * abosultely nothing) would not allow FBC to recompress
+ * absolutely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
intel_uncore_write(&i915->uncore, SCPD0,
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index a5383a2bc64b..dae9dce7d1b3 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -265,7 +265,7 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
}
/**
- * intel_gvt_resume - GVT resume routine wapper
+ * intel_gvt_resume - GVT resume routine wrapper
*
* @dev_priv: drm i915 private *
*
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index ee1cd2126f97..04076316e139 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -1260,7 +1260,7 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
/**
* intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table
- * @iter: the interator
+ * @iter: the iterator
*
* This function is called for iterating the GVT MMIO table when i915 is
* taking the snapshot of the HW and GVT is building MMIO tracking table.
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a47ecfd3fd8..8d9f4c410546 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -375,7 +375,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
* leave the device suspended skipping the driver's suspend handlers
* if the device was already runtime suspended. This is needed due to
* the difference in our runtime and system suspend sequence and
- * becaue the HDA driver may require us to enable the audio power
+ * because the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index e22669d61e95..7428bd8fa67f 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -31,7 +31,7 @@ struct drm_printer;
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
- * goes back to false exactly before we reenable the IRQs. We use this variable
+ * goes back to false exactly before we re-enable the IRQs. We use this variable
* to check if someone is trying to enable/disable IRQs while they're supposed
* to be disabled. This shouldn't happen and we'll print some error messages in
* case it happens.
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index eed4937c3ff3..04ef628e208b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2477,7 +2477,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore)
/*
* Sanitycheck that MMIO access to the device is working properly. If
- * the CPU is unable to communcate with a PCI device, BAR reads will
+ * the CPU is unable to communicate with a PCI device, BAR reads will
* return 0xFFFFFFFF. Let's make sure the device isn't in this state
* before we start trying to access registers.
*
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
index 329b4fcdc040..929c20e98300 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
@@ -41,7 +41,7 @@ struct pxp43_huc_auth_out {
/* PXP-Input-Packet: Init PXP session */
struct pxp43_create_arb_in {
struct pxp_cmd_header header;
- /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ /* header.stream_id fields for version 4.3 of Init PXP session: */
#define PXP43_INIT_SESSION_VALID BIT(0)
#define PXP43_INIT_SESSION_APPTYPE BIT(1)
#define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 07864b584cf4..febdbcd8d61e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -21,7 +21,7 @@ struct drm_i915_private;
*/
struct intel_pxp {
/**
- * @ctrl_gt: poiner to the tile that owns the controls for PXP subsystem assets that
+ * @ctrl_gt: pointer to the tile that owns the controls for PXP subsystem assets that
* the VDBOX, the KCR engine (and GSC CS depending on the platform)
*/
struct intel_gt *ctrl_gt;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 0727492576be..e817d233df61 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -80,7 +80,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
- * As a final sting in the tail, invalidate stolen. Under a real S4,
+ * As a final string in the tail, invalidate stolen. Under a real S4,
* stolen is lost and needs to be refilled on resume. However, under
* CI we merely do S4-device testing (as full S4 is too unreliable
* for automated testing across a cluster), so to simulate the effect
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5c397a2df70e..5816d515203a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -287,7 +287,8 @@ static int lowlevel_hole(struct i915_address_space *vm,
GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
- /* Ignore allocation failures (i.e. don't report them as
+ /*
+ * Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
* large objects without checking that we have sufficient
* memory. We expect to hit -ENOMEM.
@@ -446,7 +447,8 @@ static int fill_hole(struct i915_address_space *vm,
list_add(&obj->st_link, &objects);
- /* Align differing sized objects against the edges, and
+ /*
+ * Align differing sized objects against the edges, and
* check we don't walk off into the void when binding
* them into the GTT.
*/
@@ -831,7 +833,8 @@ static int drunk_hole(struct i915_address_space *vm,
return -ENOMEM;
GEM_BUG_ON(!order);
- /* Ignore allocation failures (i.e. don't report them as
+ /*
+ * Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
* large objects without checking that we have sufficient
* memory. We expect to hit -ENOMEM.
@@ -964,7 +967,7 @@ static int __shrink_hole(struct i915_address_space *vm,
break;
if (igt_timeout(end_time,
- "%s timed out at ofset %llx [%llx - %llx]\n",
+ "%s timed out at offset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
err = -EINTR;
break;
@@ -1011,7 +1014,7 @@ static int shrink_boom(struct i915_address_space *vm,
/*
* Catch the case which shrink_hole seems to miss. The setup here
* requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
- * ensuring that all vma assiocated with the respective pd/pdp are
+ * ensuring that all vma associated with the respective pd/pdp are
* unpinned at the time.
*/
@@ -1537,9 +1540,10 @@ static int igt_gtt_reserve(void *arg)
u64 total;
int err = -ENODEV;
- /* i915_gem_gtt_reserve() tries to reserve the precise range
+ /*
+ * i915_gem_gtt_reserve() tries to reserve the precise range
* for the node, and evicts if it has to. So our test checks that
- * it can give us the requsted space and prevent overlaps.
+ * it can give us the requested space and prevent overlaps.
*/
/* Start by filling the GGTT */
@@ -1743,7 +1747,8 @@ static int igt_gtt_insert(void *arg)
u64 total;
int err = -ENODEV;
- /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+ /*
+ * i915_gem_gtt_insert() tries to allocate some free space in the GTT
* to the node, evicting if required.
*/
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 71b52d5efef4..7c4111e60f2e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -159,7 +159,8 @@ static int igt_vma_create(void *arg)
LIST_HEAD(objects);
int err = -ENOMEM;
- /* Exercise creating many vma amonst many objections, checking the
+ /*
+ * Exercise creating many vma amongst many objections, checking the
* vma creation and lookup routines.
*/
@@ -292,7 +293,8 @@ static int igt_vma_pin1(void *arg)
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- /* Misusing BIAS is a programming error (it is not controllable
+ /*
+ * Misusing BIAS is a programming error (it is not controllable
* from userspace) so when debugging is enabled, it explodes.
* However, the tests are still quite interesting for checking
* variable start, end and size.
@@ -312,7 +314,8 @@ static int igt_vma_pin1(void *arg)
struct i915_vma *vma;
int err = -EINVAL;
- /* Exercise all the weird and wonderful i915_vma_pin requests,
+ /*
+ * Exercise all the weird and wonderful i915_vma_pin requests,
* focusing on error handling of boundary conditions.
*/
@@ -577,7 +580,8 @@ static int igt_vma_rotate_remap(void *arg)
const unsigned int max_pages = 64;
int err = -ENOMEM;
- /* Create VMA for many different combinations of planes and check
+ /*
+ * Create VMA for many different combinations of planes and check
* that the page layout within the rotated VMA match our expectations.
*/
@@ -804,7 +808,8 @@ static int igt_vma_partial(void *arg)
struct i915_vma *vma;
int err = -ENOMEM;
- /* Create lots of different VMA for the object and check that
+ /*
+ * Create lots of different VMA for the object and check that
* we are returned the same VMA when we later request the same range.
*/
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index 842db43e46c0..9f7c9dbc178e 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -243,7 +243,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* underneath. This is a requirement from virtualization team.
*
* In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan trhough
+ * ISA bridge in the system. To work reliably, we should scan through
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index b51a2bde73e2..99219c16e8aa 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -59,6 +59,20 @@ config DRM_XE_DISPLAY
help
Disable this option only if you want to compile out display support.
+config DRM_XE_DP_TUNNEL
+ bool "Enable DP tunnel support"
+ depends on DRM_XE_DISPLAY
+ depends on USB4
+ select DRM_DISPLAY_DP_TUNNEL
+ default y
+ help
+ Choose this option to detect DP tunnels and enable the Bandwidth
+ Allocation mode for such tunnels. This allows using the maximum
+ resolution allowed by the link BW on all displays sharing the
+ link BW, for instance on a Thunderbolt link.
+
+ If in doubt say "Y".
+
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5c97ad6ed738..3a243c4ea79b 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -199,6 +199,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
i915-display/intel_cdclk.o \
+ i915-display/intel_cmtg.o \
i915-display/intel_color.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
@@ -262,6 +263,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
i915-display/intel_quirks.o \
+ i915-display/intel_snps_hdmi_pll.o \
i915-display/intel_snps_phy.o \
i915-display/intel_tc.o \
i915-display/intel_vblank.o \
@@ -301,6 +303,9 @@ ifeq ($(CONFIG_DEBUG_FS),y)
i915-display/intel_pipe_crc.o
endif
+xe-$(CONFIG_DRM_XE_DP_TUNNEL) += \
+ i915-display/intel_dp_tunnel.o
+
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index bdae8392e125..4465c40f8134 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -10,6 +10,8 @@
#include "xe_ggtt_types.h"
+#include <linux/refcount.h>
+
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
#undef I915_TILING_Y
@@ -19,6 +21,7 @@
struct xe_bo;
struct i915_vma {
+ refcount_t ref;
struct xe_bo *bo, *dpt;
struct xe_ggtt_node *node;
};
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index b3921dbc52ff..96ba9595bf2a 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -216,7 +216,7 @@ void xe_display_fini(struct xe_device *xe)
intel_hpd_poll_fini(xe);
intel_hdcp_component_fini(display);
- intel_audio_deinit(xe);
+ intel_audio_deinit(display);
}
void xe_display_register(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 9fa51b84737c..25ce032bb293 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -9,6 +9,7 @@
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
@@ -287,6 +288,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (!vma)
return ERR_PTR(-ENODEV);
+ refcount_set(&vma->ref, 1);
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
@@ -347,6 +349,9 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
u8 tile_id = vma->node->ggtt->tile->id;
+ if (!refcount_dec_and_test(&vma->ref))
+ return;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
@@ -377,25 +382,58 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
__xe_unpin_fb_vma(vma);
}
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+static bool reuse_vma(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
{
- struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct i915_vma *vma;
+
+ if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
+ !memcmp(&old_plane_state->view.gtt,
+ &new_plane_state->view.gtt,
+ sizeof(new_plane_state->view.gtt))) {
+ vma = old_plane_state->ggtt_vma;
+ goto found;
+ }
+
+ if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (vma)
+ goto found;
+ }
+
+ return false;
+
+found:
+ refcount_inc(&vma->ref);
+ new_plane_state->ggtt_vma = vma;
+ return true;
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_gem_object *obj = intel_fb_bo(fb);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct i915_vma *vma;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
u64 phys_alignment = plane->min_alignment(plane, fb, 0);
+ if (reuse_vma(new_plane_state, old_plane_state))
+ return 0;
+
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
- vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment);
+ vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, phys_alignment);
if (IS_ERR(vma))
return PTR_ERR(vma);
- plane_state->ggtt_vma = vma;
+ new_plane_state->ggtt_vma = vma;
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 2eb9633f163a..2a2f250fa495 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
-
- /*
- * Flip to the newly created mapping ASAP, so we can re-use the
- * first part of GGTT for WOPCM, prevent flickering, and prevent
- * the lookup of sysmem scratch pages.
- */
- plane->check_plane(crtc_state, plane_state);
- plane->async_flip(NULL, plane, crtc_state, plane_state, true);
return;
nofb: