From 6cdcb5e73fba29bf115f2677c59de63f95039e2b Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 12 Jun 2013 17:27:29 -0300 Subject: drm/i915: invert the verbosity of intel_enable_fbc We currently print a DRM_DEBUG_KMS message on the happy path and don't print anything on the "failed to allocate" path. On some desktop environments (e.g., Unity) I see the "scheduling delayed FBC enable" thousands and thousands of times on my dmesg. So kill the useless message for the happy case, saving a lot of dmesg space, and properly signal the "kzalloc fail" case. Signed-off-by: Paulo Zanoni Reviewed-by: Zoltan Nyul Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccbdd83f5220..346bde1edc0e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -381,6 +381,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) { + DRM_ERROR("Failed to allocate FBC work structure\n"); dev_priv->display.enable_fbc(crtc, interval); return; } @@ -392,8 +393,6 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dev_priv->fbc_work = work; - DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); - /* Delay the actual enabling to let pageflipping cease and the * display to settle before starting the compression. Note that * this delay also serves a second purpose: it allows for a -- cgit v1.2.3 From b63fb44c65ac37ceac8acd258939fcdc9f223c42 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 16:22:01 +0100 Subject: drm/i915: Make intel_enable_fbc() static This function has no user outside of intel_pm.c. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_pm.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f48230..8eeee74f3d91 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -780,7 +780,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern void intel_init_pm(struct drm_device *dev); /* FBC */ extern bool intel_fbc_enabled(struct drm_device *dev); -extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_update_fbc(struct drm_device *dev); /* IPS */ extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 346bde1edc0e..e0de8b3337d8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -368,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) dev_priv->fbc_work = NULL; } -void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct intel_fbc_work *work; struct drm_device *dev = crtc->dev; -- cgit v1.2.3 From 8a5729a37375c20a196e14ce49b4390d42bdb87b Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 16:22:02 +0100 Subject: drm/i915: Fix reason for per-chip disabling of FBC When running on my snb machine, recent kernels display successively: [drm:intel_update_fbc], fbc set to per-chip default [drm:intel_update_fbc], fbc disabled per module param But no module param is set. This happens because the check for the module parameter uses a variable that has been overridden inside the "per-chip default" code. Fix up the logic and add another reason for the FBC to the be disabled. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 14 ++++++-------- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 47d6c748057e..dca49828e3fc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1518,6 +1518,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) case FBC_MODULE_PARAM: seq_printf(m, "disabled per module param (default off)"); break; + case FBC_CHIP_DEFAULT: + seq_printf(m, "disabled per chip default"); + break; default: seq_printf(m, "unknown reason"); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc1d6056ab70..59c45f293ec9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -537,6 +537,7 @@ enum no_fbc_reason { FBC_NOT_TILED, /* buffer not tiled */ FBC_MULTIPLE_PIPES, /* more than one pipe active */ FBC_MODULE_PARAM, + FBC_CHIP_DEFAULT, /* disabled by default on this chip */ }; enum intel_pch { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e0de8b3337d8..adc7f3eb4ef3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -447,7 +447,6 @@ void intel_update_fbc(struct drm_device *dev) struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; - int enable_fbc; unsigned int max_hdisplay, max_vdisplay; if (!i915_powersave) @@ -488,14 +487,13 @@ void intel_update_fbc(struct drm_device *dev) intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - enable_fbc = i915_enable_fbc; - if (enable_fbc < 0) { - DRM_DEBUG_KMS("fbc set to per-chip default\n"); - enable_fbc = 1; - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) - enable_fbc = 0; + if (i915_enable_fbc < 0 && + INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { + DRM_DEBUG_KMS("disabled per chip default\n"); + dev_priv->no_fbc_reason = FBC_CHIP_DEFAULT; + goto out_disable; } - if (!enable_fbc) { + if (!i915_enable_fbc) { DRM_DEBUG_KMS("fbc disabled per module param\n"); dev_priv->no_fbc_reason = FBC_MODULE_PARAM; goto out_disable; -- cgit v1.2.3 From 267f0c90ac6728f70fade74ab89932a00e5e5a7e Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 22:59:48 +0100 Subject: drm/i915: Use seq_puts/seq_putc when possible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Caught with checkpatch.pl. Suggested-by: Daniel Vetter Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 110 ++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index dca49828e3fc..b1e91f33e377 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -157,11 +157,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) switch (list) { case ACTIVE_LIST: - seq_printf(m, "Active:\n"); + seq_puts(m, "Active:\n"); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: - seq_printf(m, "Inactive:\n"); + seq_puts(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; default: @@ -171,9 +171,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, head, mm_list) { - seq_printf(m, " "); + seq_puts(m, " "); describe_obj(m, obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); total_obj_size += obj->base.size; total_gtt_size += obj->gtt_space->size; count++; @@ -290,7 +290,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->gtt.total, dev_priv->gtt.mappable_end - dev_priv->gtt.start); - seq_printf(m, "\n"); + seq_putc(m, '\n'); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; @@ -329,9 +329,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) if (list == PINNED_LIST && obj->pin_count == 0) continue; - seq_printf(m, " "); + seq_puts(m, " "); describe_obj(m, obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); total_obj_size += obj->base.size; total_gtt_size += obj->gtt_space->size; count++; @@ -371,9 +371,9 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) pipe, plane); } if (work->enable_stall_check) - seq_printf(m, "Stall check enabled, "); + seq_puts(m, "Stall check enabled, "); else - seq_printf(m, "Stall check waiting for page flip ioctl, "); + seq_puts(m, "Stall check waiting for page flip ioctl, "); seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); if (work->old_fb_obj) { @@ -424,7 +424,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) mutex_unlock(&dev->struct_mutex); if (count == 0) - seq_printf(m, "No requests\n"); + seq_puts(m, "No requests\n"); return 0; } @@ -574,10 +574,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Fence %d, pin count = %d, object = ", i, dev_priv->fence_regs[i].pin_count); if (obj == NULL) - seq_printf(m, "unused"); + seq_puts(m, "unused"); else describe_obj(m, obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } mutex_unlock(&dev->struct_mutex); @@ -1246,7 +1246,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) (freq_sts >> 8) & 0xff)); mutex_unlock(&dev_priv->rps.hw_lock); } else { - seq_printf(m, "no P-state info available\n"); + seq_puts(m, "no P-state info available\n"); } return 0; @@ -1341,28 +1341,28 @@ static int ironlake_drpc_info(struct seq_file *m) seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); seq_printf(m, "Render standby enabled: %s\n", (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); - seq_printf(m, "Current RS state: "); + seq_puts(m, "Current RS state: "); switch (rstdbyctl & RSX_STATUS_MASK) { case RSX_STATUS_ON: - seq_printf(m, "on\n"); + seq_puts(m, "on\n"); break; case RSX_STATUS_RC1: - seq_printf(m, "RC1\n"); + seq_puts(m, "RC1\n"); break; case RSX_STATUS_RC1E: - seq_printf(m, "RC1E\n"); + seq_puts(m, "RC1E\n"); break; case RSX_STATUS_RS1: - seq_printf(m, "RS1\n"); + seq_puts(m, "RS1\n"); break; case RSX_STATUS_RS2: - seq_printf(m, "RS2 (RC6)\n"); + seq_puts(m, "RS2 (RC6)\n"); break; case RSX_STATUS_RS3: - seq_printf(m, "RC3 (RC6+)\n"); + seq_puts(m, "RC3 (RC6+)\n"); break; default: - seq_printf(m, "unknown\n"); + seq_puts(m, "unknown\n"); break; } @@ -1389,8 +1389,8 @@ static int gen6_drpc_info(struct seq_file *m) spin_unlock_irq(&dev_priv->gt_lock); if (forcewake_count) { - seq_printf(m, "RC information inaccurate because somebody " - "holds a forcewake reference \n"); + seq_puts(m, "RC information inaccurate because somebody " + "holds a forcewake reference \n"); } else { /* NB: we cannot use forcewake, else we read the wrong values */ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) @@ -1423,25 +1423,25 @@ static int gen6_drpc_info(struct seq_file *m) yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); seq_printf(m, "Deepest RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); - seq_printf(m, "Current RC state: "); + seq_puts(m, "Current RC state: "); switch (gt_core_status & GEN6_RCn_MASK) { case GEN6_RC0: if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) - seq_printf(m, "Core Power Down\n"); + seq_puts(m, "Core Power Down\n"); else - seq_printf(m, "on\n"); + seq_puts(m, "on\n"); break; case GEN6_RC3: - seq_printf(m, "RC3\n"); + seq_puts(m, "RC3\n"); break; case GEN6_RC6: - seq_printf(m, "RC6\n"); + seq_puts(m, "RC6\n"); break; case GEN6_RC7: - seq_printf(m, "RC7\n"); + seq_puts(m, "RC7\n"); break; default: - seq_printf(m, "Unknown\n"); + seq_puts(m, "Unknown\n"); break; } @@ -1485,46 +1485,46 @@ static int i915_fbc_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; if (!I915_HAS_FBC(dev)) { - seq_printf(m, "FBC unsupported on this chipset\n"); + seq_puts(m, "FBC unsupported on this chipset\n"); return 0; } if (intel_fbc_enabled(dev)) { - seq_printf(m, "FBC enabled\n"); + seq_puts(m, "FBC enabled\n"); } else { - seq_printf(m, "FBC disabled: "); + seq_puts(m, "FBC disabled: "); switch (dev_priv->no_fbc_reason) { case FBC_NO_OUTPUT: - seq_printf(m, "no outputs"); + seq_puts(m, "no outputs"); break; case FBC_STOLEN_TOO_SMALL: - seq_printf(m, "not enough stolen memory"); + seq_puts(m, "not enough stolen memory"); break; case FBC_UNSUPPORTED_MODE: - seq_printf(m, "mode not supported"); + seq_puts(m, "mode not supported"); break; case FBC_MODE_TOO_LARGE: - seq_printf(m, "mode too large"); + seq_puts(m, "mode too large"); break; case FBC_BAD_PLANE: - seq_printf(m, "FBC unsupported on plane"); + seq_puts(m, "FBC unsupported on plane"); break; case FBC_NOT_TILED: - seq_printf(m, "scanout buffer not tiled"); + seq_puts(m, "scanout buffer not tiled"); break; case FBC_MULTIPLE_PIPES: - seq_printf(m, "multiple pipes are enabled"); + seq_puts(m, "multiple pipes are enabled"); break; case FBC_MODULE_PARAM: - seq_printf(m, "disabled per module param (default off)"); + seq_puts(m, "disabled per module param (default off)"); break; case FBC_CHIP_DEFAULT: - seq_printf(m, "disabled per chip default"); + seq_puts(m, "disabled per chip default"); break; default: - seq_printf(m, "unknown reason"); + seq_puts(m, "unknown reason"); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } return 0; } @@ -1607,7 +1607,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) int gpu_freq, ia_freq; if (!(IS_GEN6(dev) || IS_GEN7(dev))) { - seq_printf(m, "unsupported on this chipset\n"); + seq_puts(m, "unsupported on this chipset\n"); return 0; } @@ -1615,7 +1615,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) if (ret) return ret; - seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); for (gpu_freq = dev_priv->rps.min_delay; gpu_freq <= dev_priv->rps.max_delay; @@ -1704,7 +1704,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.bits_per_pixel, atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); mutex_unlock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.fb_lock); @@ -1719,7 +1719,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.bits_per_pixel, atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); @@ -1739,22 +1739,22 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; if (dev_priv->ips.pwrctx) { - seq_printf(m, "power context "); + seq_puts(m, "power context "); describe_obj(m, dev_priv->ips.pwrctx); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } if (dev_priv->ips.renderctx) { - seq_printf(m, "render context "); + seq_puts(m, "render context "); describe_obj(m, dev_priv->ips.renderctx); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } for_each_ring(ring, dev_priv, i) { if (ring->default_context) { seq_printf(m, "HW default context %s ring ", ring->name); describe_obj(m, ring->default_context->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } } @@ -1871,7 +1871,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - seq_printf(m, "aliasing PPGTT:\n"); + seq_puts(m, "aliasing PPGTT:\n"); seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); } seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); @@ -1889,7 +1889,7 @@ static int i915_dpio_info(struct seq_file *m, void *data) if (!IS_VALLEYVIEW(dev)) { - seq_printf(m, "unsupported\n"); + seq_puts(m, "unsupported\n"); return 0; } -- cgit v1.2.3 From aee56cff333d15e14c5bb2ff3b1e5c7cd15c3805 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 22:59:49 +0100 Subject: drm/i915: Fix a few style issues found by checkpatch.pl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Missing spaces and misplaced '*'. Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b1e91f33e377..c16926ca15be 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -222,7 +222,7 @@ static int per_file_stats(int id, void *ptr, void *data) return 0; } -static int i915_gem_object_info(struct seq_file *m, void* data) +static int i915_gem_object_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; @@ -310,7 +310,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) return 0; } -static int i915_gem_gtt_info(struct seq_file *m, void* data) +static int i915_gem_gtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; @@ -1377,8 +1377,7 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_i915_private *dev_priv = dev->dev_private; u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; unsigned forcewake_count; - int count=0, ret; - + int count = 0, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -1781,7 +1780,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) static const char *swizzle_string(unsigned swizzle) { - switch(swizzle) { + switch (swizzle) { case I915_BIT_6_SWIZZLE_NONE: return "none"; case I915_BIT_6_SWIZZLE_9: -- cgit v1.2.3 From f4db9321a77258587d70cccdd4ff556df48eba2e Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 22:59:50 +0100 Subject: drm/i915: Fix a couple of "should it be static?" sparse warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A genuine 'static' omission and 2 other warnings triggered by not including the header where those functions where defined. Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index adc7f3eb4ef3..aa48fc6ba4db 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -30,6 +30,7 @@ #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" #include +#include #define FORCEWAKE_ACK_TIMEOUT_MS 2 @@ -2465,8 +2466,8 @@ static void hsw_compute_wm_results(struct drm_device *dev, /* Find the result with the highest level enabled. Check for enable_fbc_wm in * case both are at the same level. Prefer r1 in case they're the same. */ -struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, - struct hsw_wm_values *r2) +static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, + struct hsw_wm_values *r2) { int i, val_r1 = 0, val_r2 = 0; -- cgit v1.2.3 From 3ef8fb5ae296c3b626b87ec1422aeb66dd338ee8 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 24 Jun 2013 14:54:50 +0100 Subject: drm/i915: Bail out once we've found the context object Once we've found the the context object programmed in CCID, there's no need to look the other objects in the list. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3d92a7cef154..92e78167ca84 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1805,6 +1805,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { ering->ctx = i915_error_object_create_sized(dev_priv, obj, 1); + break; } } } -- cgit v1.2.3 From 15bdd4cff43104cc0692f8694019c043cf19d102 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:23 +0200 Subject: drm/i915: consolidate pch pll enable sequence It's been splattered over 3 different places all doing random things. Now we have (mostly) the same sequence as i8xx/i9xx, but all called from the crtc_enable hook (through the pll->enable function): - write new dividers - enable vco and wait for stable clocks - write again for the pixel mutliplier I've left the seemingly random 200 usec delay in there, just in case. Also move the encoder->pre_pll_enable hook into the crtc_enable function, at the same spot we currently have a hack to enable the lvds port. Since that hack is now redundant, kill it. While doing this patch I've learned the hard way that we can only fire up the LVDS port if both the pch dpll _and_ the fdi rc pll are not yet enabled. Otherwise things go haywire, at least on cpt. v2: It is paramount to write the FPx divisors before we enable the the vco by writing to the DPLL registers, for otherwise the divisors won't get updated. This is in line with the i8xx/i9xx dpll. v3: To keep the nice abstraction add a ->mode_set callback to set the divisors. Also streamline the enabling/disabling code a bit by removing some cargo-cult duplication and clearing registers where possible in the ->disable hook. v4: Remove now unused local variable. Acked-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_display.c | 75 +++++++++++++----------------------- 2 files changed, 29 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 59c45f293ec9..dfb10fee83b6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -156,6 +156,8 @@ struct intel_shared_dpll { /* should match the index in the dev_priv->shared_dplls array */ enum intel_dpll_id id; struct intel_dpll_hw_state hw_state; + void (*mode_set)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); void (*enable)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll); void (*disable)(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b7..644be8aed396 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3096,13 +3096,7 @@ found: WARN_ON(pll->on); assert_shared_dpll_disabled(dev_priv, pll); - /* Wait for the clocks to stabilize before rewriting the regs */ - I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(PCH_DPLL(pll->id)); - udelay(150); - - I915_WRITE(PCH_FP0(pll->id), fp); - I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); + pll->mode_set(dev_priv, pll); } pll->refcount++; @@ -3174,7 +3168,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - u32 temp; WARN_ON(!crtc->enabled); @@ -3188,12 +3181,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(dev); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - } - + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); if (intel_crtc->config.has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -5723,10 +5713,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - if (is_lvds && has_reduced_clock && i915_powersave) intel_crtc->lowfreq_avail = true; else @@ -5735,23 +5721,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (intel_crtc->config.has_pch_encoder) { pll = intel_crtc_to_shared_dpll(intel_crtc); - I915_WRITE(PCH_DPLL(pll->id), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(pll->id)); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(PCH_DPLL(pll->id), dpll); - - if (has_reduced_clock) - I915_WRITE(PCH_FP1(pll->id), fp2); - else - I915_WRITE(PCH_FP1(pll->id), fp); } intel_set_pipe_timings(intel_crtc); @@ -8800,19 +8769,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, return val & DPLL_VCO_ENABLE; } +static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); + I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); +} + static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t reg, val; - /* PCH refclock must be enabled first */ assert_pch_refclk_enabled(dev_priv); - reg = PCH_DPLL(pll->id); - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(PCH_DPLL(pll->id)); + udelay(150); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); + POSTING_READ(PCH_DPLL(pll->id)); udelay(200); } @@ -8821,7 +8803,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, { struct drm_device *dev = dev_priv->dev; struct intel_crtc *crtc; - uint32_t reg, val; /* Make sure no transcoder isn't still depending on us. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { @@ -8829,11 +8810,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, assert_pch_transcoder_disabled(dev_priv, crtc->pipe); } - reg = PCH_DPLL(pll->id); - val = I915_READ(reg); - val &= ~DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(PCH_DPLL(pll->id), 0); + POSTING_READ(PCH_DPLL(pll->id)); udelay(200); } @@ -8852,6 +8830,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev) for (i = 0; i < dev_priv->num_shared_dpll; i++) { dev_priv->shared_dplls[i].id = i; dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; + dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; dev_priv->shared_dplls[i].get_hw_state = -- cgit v1.2.3 From b89a1d395bf8bd209f1e14265c5b1d34c4a98d57 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:24 +0200 Subject: drm/i915: use sw tracked state to select shared dplls Just yet another prep step to be able to do all this up-front, before we've set up any of the shared dplls in the new state. This will eventually be useful for atomic modesetting. Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 644be8aed396..6b379bcb031f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3031,7 +3031,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc) crtc->config.shared_dpll = DPLL_ID_PRIVATE; } -static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) +static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3061,8 +3061,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, if (pll->refcount == 0) continue; - if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && - fp == I915_READ(PCH_FP0(pll->id))) { + if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, + sizeof(pll->hw_state)) == 0) { DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", crtc->base.base.id, pll->name, pll->refcount, pll->active); @@ -5701,7 +5701,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, else intel_crtc->config.dpll_hw_state.fp1 = fp; - pll = intel_get_shared_dpll(intel_crtc, dpll, fp); + pll = intel_get_shared_dpll(intel_crtc); if (pll == NULL) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", pipe_name(pipe)); -- cgit v1.2.3 From 87442f732bfad16a8b65fb5d86f69bc0417dc9db Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Jun 2013 00:52:17 +0200 Subject: drm/i915: duplicate intel_enable_pll into i9xx and vlv versions Mostly since I _really_ don't want to touch the vlv hell. No code change, just duplication. Also kill a now seriously outdated code comment - the remark about the dvo encoder is now handled with the pipe A quirk. v2: Update the BUG_ONs as suggested by Jani (both in vlv_ and i9xx_ functions, since the split happens here). Cc: Jani Nikula Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 51 ++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6b379bcb031f..e0efe4b015fb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1301,20 +1301,37 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); } -/** - * intel_enable_pll - enable a PLL - * @dev_priv: i915 private structure - * @pipe: pipe PLL to enable - * - * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to - * make sure the PLL reg is writable first though, since the panel write - * protect mechanism may be enabled. - * - * Note! This is for pre-ILK only. - * - * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. - */ -static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void vlv_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + int reg; + u32 val; + + assert_pipe_disabled(dev_priv, pipe); + + /* No really, not for ILK+ */ + BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); + + /* PLL is protected by panel, make sure we can write it */ + if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) + assert_panel_unlocked(dev_priv, pipe); + + reg = DPLL(pipe); + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + + /* We do this three times for luck */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ +} + +static void i9xx_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; @@ -1322,7 +1339,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) assert_pipe_disabled(dev_priv, pipe); /* No really, not for ILK+ */ - BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); + BUG_ON(dev_priv->info->gen >= 5); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) @@ -3589,7 +3606,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - intel_enable_pll(dev_priv, pipe); + vlv_enable_pll(dev_priv, pipe); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -3630,7 +3647,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - intel_enable_pll(dev_priv, pipe); + i9xx_enable_pll(dev_priv, pipe); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) -- cgit v1.2.3 From 55607e8aaa86e68ed4f37d072ee9af404cc8a830 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 16 Jun 2013 21:42:39 +0200 Subject: drm/i915: asserts for lvds pre_enable Lots of bangin my head against the wall^UExperiments have shown that we really need to enable the lvds port before we enable plls. Strangely that seems to include the fdi rx pll on the pch. Note that the pch pll assert can fire since the lvds port has it's own special clock source settings in the DPLL register, which means it will never have a shared dpll (since there's only one LVDS port). Anyway, encode this new evidence with a few nice WARNs. v2: Incorporate review comments from Imre. - Explain why lvds can't have a shared dpll. - Update the WARN output. Cc: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 26 +++++++++++++------------- drivers/gpu/drm/i915/intel_drv.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_lvds.c | 17 ++++++++++++----- 3 files changed, 41 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e0efe4b015fb..66c6e383c7f7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -892,8 +892,8 @@ static const char *state_string(bool enabled) } /* Only for pre-ILK configs */ -static void assert_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) +void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) { int reg; u32 val; @@ -906,10 +906,8 @@ static void assert_pll(struct drm_i915_private *dev_priv, "PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } -#define assert_pll_enabled(d, p) assert_pll(d, p, true) -#define assert_pll_disabled(d, p) assert_pll(d, p, false) -static struct intel_shared_dpll * +struct intel_shared_dpll * intel_crtc_to_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; @@ -921,9 +919,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc) } /* For ILK+ */ -static void assert_shared_dpll(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - bool state) +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state) { bool cur_state; struct intel_dpll_hw_state hw_state; @@ -942,8 +940,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv, "%s assertion failure (expected %s, current %s)\n", pll->name, state_string(state), state_string(cur_state)); } -#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) -#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1007,15 +1003,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); } -static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) { int reg; u32 val; + bool cur_state; reg = FDI_RX_CTL(pipe); val = I915_READ(reg); - WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); + cur_state = !!(val & FDI_RX_PLL_ENABLE); + WARN(cur_state != state, + "FDI RX PLL assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); } static void assert_panel_unlocked(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8eeee74f3d91..6f7f33e70e91 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -747,6 +747,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data, extern void intel_fb_output_poll_changed(struct drm_device *dev); extern void intel_fb_restore_mode(struct drm_device *dev); +struct intel_shared_dpll * +intel_crtc_to_shared_dpll(struct intel_crtc *crtc); + +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state); +#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) +#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) +void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_pll_enabled(d, p) assert_pll(d, p, true) +#define assert_pll_disabled(d, p) assert_pll(d, p, false) +void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) +#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2abb2d3c727b..a510fa824027 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -120,12 +120,20 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_display_mode *fixed_mode = lvds_encoder->attached_connector->base.panel.fixed_mode; - int pipe = intel_crtc->pipe; + int pipe = crtc->pipe; u32 temp; + if (HAS_PCH_SPLIT(dev)) { + assert_fdi_rx_pll_disabled(dev_priv, pipe); + assert_shared_dpll_disabled(dev_priv, + intel_crtc_to_shared_dpll(crtc)); + } else { + assert_pll_disabled(dev_priv, pipe); + } + temp = I915_READ(lvds_encoder->reg); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; @@ -142,7 +150,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) /* set the corresponsding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; - temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; + temp |= crtc->config.gmch_pfit.lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ @@ -162,8 +170,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) if (INTEL_INFO(dev)->gen == 4) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ - if (intel_crtc->config.dither && - intel_crtc->config.pipe_bpp == 18) + if (crtc->config.dither && crtc->config.pipe_bpp == 18) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; -- cgit v1.2.3 From 952735ee416f686fac55957b221461dfbd80ce1c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:27 +0200 Subject: drm/i915: move encoder pre enable hooks togther on ilk+ The ->pre_enable hook is only used for the cpu edp port on ilk-ivb, so we can safely move it up across the fdi pll enabling. Unfortunately we can't (yet) merge in the pre_pll enable hook despite that only lvds uses it on ilk-ivb: Since the same lvds hook is also need on i9xx platforms we need to fix up the pll enabling sequence there, too. Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 66c6e383c7f7..f7d4c5eba6ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3198,9 +3198,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(dev); - for_each_encoder_on_crtc(dev, crtc, encoder) + for_each_encoder_on_crtc(dev, crtc, encoder) { if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); + if (encoder->pre_enable) + encoder->pre_enable(encoder); + } if (intel_crtc->config.has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -3212,10 +3215,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) assert_fdi_rx_disabled(dev_priv, pipe); } - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); - ironlake_pfit_enable(intel_crtc); /* -- cgit v1.2.3 From 8bcc2795a68ad9c2010fd5a2548432fad930fcc1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:28 +0200 Subject: drm/i915: hw state readout for i9xx dplls In addition to existing stuff we also need to track DPLL_MD on gen4 and vlv. This is prep work so that we can move the dpll enable sequence out from the ->mode_set callback into the crtc enabling functions. Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dfb10fee83b6..e9c8b588e14d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -144,6 +144,7 @@ enum intel_dpll_id { struct intel_dpll_hw_state { uint32_t dpll; + uint32_t dpll_md; uint32_t fp0; uint32_t fp1; }; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f7d4c5eba6ba..4e451faf96c6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4272,14 +4272,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, } I915_WRITE(FP0(pipe), fp); + crtc->config.dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && reduced_clock && i915_powersave) { I915_WRITE(FP1(pipe), fp2); + crtc->config.dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; } else { I915_WRITE(FP1(pipe), fp); + crtc->config.dpll_hw_state.fp1 = fp; } } @@ -4457,6 +4460,8 @@ static void vlv_update_pll(struct intel_crtc *crtc) dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; dpll |= DPLL_VCO_ENABLE; + crtc->config.dpll_hw_state.dpll = dpll; + I915_WRITE(DPLL(pipe), dpll); POSTING_READ(DPLL(pipe)); udelay(150); @@ -4466,6 +4471,8 @@ static void vlv_update_pll(struct intel_crtc *crtc) dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + crtc->config.dpll_hw_state.dpll_md = dpll_md; + I915_WRITE(DPLL_MD(pipe), dpll_md); POSTING_READ(DPLL_MD(pipe)); @@ -4544,6 +4551,8 @@ static void i9xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; + crtc->config.dpll_hw_state.dpll = dpll; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); POSTING_READ(DPLL(pipe)); udelay(150); @@ -4564,6 +4573,8 @@ static void i9xx_update_pll(struct intel_crtc *crtc, if (INTEL_INFO(dev)->gen >= 4) { u32 dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + crtc->config.dpll_hw_state.dpll_md = dpll_md; + I915_WRITE(DPLL_MD(pipe), dpll_md); } else { /* The pixel multiplier can only be updated once the @@ -4608,6 +4619,8 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; + crtc->config.dpll_hw_state.dpll = dpll; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); POSTING_READ(DPLL(pipe)); udelay(150); @@ -4964,6 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; + pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { tmp = I915_READ(DPLL(crtc->pipe)); pipe_config->pixel_multiplier = @@ -4975,6 +4989,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, * function. */ pipe_config->pixel_multiplier = 1; } + pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); + if (!IS_VALLEYVIEW(dev)) { + pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); + pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); + } return true; } @@ -8126,6 +8145,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); + PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); -- cgit v1.2.3 From 66e3d5c09940d08d94b03e65b420fadaa7484318 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 16 Jun 2013 21:24:16 +0200 Subject: drm/i915: move i9xx dpll enabling into crtc enable function Now that we have the proper pipe config to track this, we don't need to write any registers any more. Note that for platforms without DPLL_MD (pre-gen4) which store the pixel mutliplier in the DPLL register I've decided to keep the seemingly "redundant" write: The comment right below saying "do this trice for luck" doesn't instill confidence ... v2: Drop a few now unnecessary local variables and switch the enable function to take a struct intel_crtc * to simply arguments. v3: Rebase on top of the newly-colored BUG_ON. v4: Amend commit message to alliviate Imre's comment about the redudant DPLL write for the pixel mutliplier. Cc: Imre Deak Cc: Jani Nikula Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 100 +++++++++++++---------------------- 1 file changed, 36 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4e451faf96c6..1a8a01bc28a4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1331,32 +1331,48 @@ static void vlv_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) udelay(150); /* wait for warmup */ } -static void i9xx_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void i9xx_enable_pll(struct intel_crtc *crtc) { - int reg; - u32 val; + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int reg = DPLL(crtc->pipe); + u32 dpll = crtc->config.dpll_hw_state.dpll; - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, crtc->pipe); /* No really, not for ILK+ */ BUG_ON(dev_priv->info->gen >= 5); /* PLL is protected by panel, make sure we can write it */ - if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) - assert_panel_unlocked(dev_priv, pipe); + if (IS_MOBILE(dev) && !IS_I830(dev)) + assert_panel_unlocked(dev_priv, crtc->pipe); - reg = DPLL(pipe); - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); + udelay(150); + + if (INTEL_INFO(dev)->gen >= 4) { + I915_WRITE(DPLL_MD(crtc->pipe), + crtc->config.dpll_hw_state.dpll_md); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(reg, dpll); + } /* We do this three times for luck */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ } @@ -3646,7 +3662,11 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - i9xx_enable_pll(dev_priv, pipe); + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + + i9xx_enable_pll(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -4488,8 +4508,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; - int pipe = crtc->pipe; u32 dpll; bool is_sdvo; struct dpll *clock = &crtc->config.dpll; @@ -4553,37 +4571,14 @@ static void i9xx_update_pll(struct intel_crtc *crtc, dpll |= DPLL_VCO_ENABLE; crtc->config.dpll_hw_state.dpll = dpll; - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(DPLL(pipe)); - udelay(150); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - - if (crtc->config.has_dp_encoder) - intel_dp_set_m_n(crtc); - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - if (INTEL_INFO(dev)->gen >= 4) { u32 dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; crtc->config.dpll_hw_state.dpll_md = dpll_md; - - I915_WRITE(DPLL_MD(pipe), dpll_md); - } else { - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); } + + if (crtc->config.has_dp_encoder) + intel_dp_set_m_n(crtc); } static void i8xx_update_pll(struct intel_crtc *crtc, @@ -4592,8 +4587,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; - int pipe = crtc->pipe; u32 dpll; struct dpll *clock = &crtc->config.dpll; @@ -4620,27 +4613,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= DPLL_VCO_ENABLE; crtc->config.dpll_hw_state.dpll = dpll; - - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(DPLL(pipe)); - udelay(150); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); } static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) -- cgit v1.2.3 From f6736a1a7b846d0af90135c7a7f121ab3ada9ee1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:30 +0200 Subject: drm/i915: s/pre_pll/pre/ on the lvds port enable function i9xx doesn't use pre_enable at all, so we can fold this in now. Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 13 +++---------- drivers/gpu/drm/i915/intel_lvds.c | 4 ++-- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1a8a01bc28a4..0497841f841e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3214,12 +3214,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(dev); - for_each_encoder_on_crtc(dev, crtc, encoder) { - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); + for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); - } if (intel_crtc->config.has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -3662,16 +3659,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - - i9xx_enable_pll(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); + i9xx_enable_pll(intel_crtc); + i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a510fa824027..b0e1088b2c97 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -115,7 +115,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, * This is an exception to the general rule that mode_set doesn't turn * things on. */ -static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) +static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -946,7 +946,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_ENCODER_LVDS); intel_encoder->enable = intel_enable_lvds; - intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; + intel_encoder->pre_enable = intel_pre_enable_lvds; intel_encoder->compute_config = intel_lvds_compute_config; intel_encoder->disable = intel_disable_lvds; intel_encoder->get_hw_state = intel_lvds_get_hw_state; -- cgit v1.2.3 From 165e901caa4c9d768dd572aab6b95f89a2e9e204 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 26 Jun 2013 17:44:15 +0300 Subject: drm/i915: Mask out hardware status bits from VLV DPLL register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DPLL lock bit, and the DPIO phy status bits are read-only and controlled by the hardware, so they will never be set by the driver. Mask them out when reading the hw state, so that the state comparison won't fail. Signed-off-by: Ville Syrjälä Reviewed-by: Jesse Barnes [danvet: Jesse asked for a code comment and I wholeheartly agree, so added one.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0497841f841e..d60684d9847c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4958,6 +4958,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!IS_VALLEYVIEW(dev)) { pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); + } else { + /* Mask out read-only status bits. */ + pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | + DPLL_PORTC_READY_MASK | + DPLL_PORTB_READY_MASK); } return true; -- cgit v1.2.3 From 5476f8505b4c4178dbb9f4e9d2bf17e52d8026ed Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:02 -0700 Subject: drm/i915: Remove extra error state NULL Not only was there an extra, but since we now kzalloc the error state, we don't need either. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 92e78167ca84..6a1ae614268e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1931,10 +1931,6 @@ static void i915_capture_error_state(struct drm_device *dev) i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); - /* Record buffers on the active and pinned lists. */ - error->active_bo = NULL; - error->pinned_bo = NULL; - i = 0; list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; @@ -1944,8 +1940,6 @@ static void i915_capture_error_state(struct drm_device *dev) i++; error->pinned_bo_count = i - error->active_bo_count; - error->active_bo = NULL; - error->pinned_bo = NULL; if (i) { error->active_bo = kmalloc(sizeof(*error->active_bo)*i, GFP_ATOMIC); -- cgit v1.2.3 From 26b7c22465cbfaa40d7f2de6d5933a66106eb778 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:03 -0700 Subject: drm/i915: Extract error buffer capture This helps when we have per VM buffer capturing. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 69 +++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6a1ae614268e..6ca6097aa48f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1855,6 +1855,42 @@ static void i915_gem_record_rings(struct drm_device *dev, } } +static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct drm_i915_gem_object *obj; + int i; + + i = 0; + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) + i++; + error->active_bo_count = i; + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + if (obj->pin_count) + i++; + error->pinned_bo_count = i - error->active_bo_count; + + if (i) { + error->active_bo = kmalloc(sizeof(*error->active_bo)*i, + GFP_ATOMIC); + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; + } + + if (error->active_bo) + error->active_bo_count = + capture_active_bo(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_pinned_bo(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.bound_list); +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -1867,10 +1903,9 @@ static void i915_gem_record_rings(struct drm_device *dev, static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; unsigned long flags; - int i, pipe; + int pipe; spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); error = dev_priv->gpu_error.first_error; @@ -1928,38 +1963,10 @@ static void i915_capture_error_state(struct drm_device *dev) i915_get_extra_instdone(dev, error->extra_instdone); + i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); - i = 0; - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) - i++; - error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (obj->pin_count) - i++; - error->pinned_bo_count = i - error->active_bo_count; - - if (i) { - error->active_bo = kmalloc(sizeof(*error->active_bo)*i, - GFP_ATOMIC); - if (error->active_bo) - error->pinned_bo = - error->active_bo + error->active_bo_count; - } - - if (error->active_bo) - error->active_bo_count = - capture_active_bo(error->active_bo, - error->active_bo_count, - &dev_priv->mm.active_list); - - if (error->pinned_bo) - error->pinned_bo_count = - capture_pinned_bo(error->pinned_bo, - error->pinned_bo_count, - &dev_priv->mm.bound_list); - do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); -- cgit v1.2.3 From 6670a5a5c77b8fc17962742f9bcf6f47e489aa62 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:04 -0700 Subject: drm/i915: make PDE|PTE platform specific Nothing outside of i915_gem_gtt.c and more specifically, the relevant gen specific init function should need to know about number of PDEs, or PTEs per PD. Exposing this will only lead to circumventing using the upcoming VM abstraction. To accomplish this, move the defines into the .c file, rename the PDE define to be GEN6, and make the PTE count less of a magic number. The remaining code in the global gtt setup is a bit messy, but an upcoming patch will clean that one up. v2: Don't hardcode number of PDEs (Daniel + Jesse) Reworded commit message to reflect change. Reviewed-by: Jesse Barnes Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem_gtt.c | 9 ++++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9c8b588e14d..91c1fdc6a4ab 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -486,8 +486,6 @@ struct i915_gtt { }; #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) -#define I915_PPGTT_PD_ENTRIES 512 -#define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { struct drm_device *dev; unsigned num_pd_entries; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5101ab6869b4..216e7a19e63d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -28,6 +28,9 @@ #include "i915_trace.h" #include "intel_drv.h" +#define GEN6_PPGTT_PD_ENTRIES 512 +#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) + /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -278,7 +281,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else { ppgtt->pte_encode = gen6_pte_encode; } - ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; + ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; ppgtt->enable = gen6_ppgtt_enable; ppgtt->clear_range = gen6_ppgtt_clear_range; ppgtt->insert_entries = gen6_ppgtt_insert_entries; @@ -688,7 +691,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 7) { /* PPGTT pdes are stolen from global gtt ptes, so shrink the * aperture accordingly when using aliasing ppgtt. */ - gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); @@ -699,7 +702,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); drm_mm_takedown(&dev_priv->mm.gtt_space); - gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } -- cgit v1.2.3 From 84f135605898708ab692fc84555c31fbfe2983c1 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:17 -0700 Subject: drm/i915: Really share scratch page A previous patch had set up the ppgtt and ggtt to use the same scratch page, but still kept around both pointers. Kill it, it's not needed and gets in our way for upcoming cleanups. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 91c1fdc6a4ab..af8eadf796a8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -492,7 +492,6 @@ struct i915_hw_ppgtt { struct page **pt_pages; uint32_t pd_offset; dma_addr_t *pt_dma_addr; - dma_addr_t scratch_page_dma_addr; /* pte functions, mirroring the interface of the global gtt. */ void (*clear_range)(struct i915_hw_ppgtt *ppgtt, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 216e7a19e63d..46b9e32b1109 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -188,13 +188,14 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries) { + struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; gen6_gtt_pte_t *pt_vaddr, scratch_pte; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; scratch_pte = ppgtt->pte_encode(ppgtt->dev, - ppgtt->scratch_page_dma_addr, + dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC); while (num_entries) { @@ -351,7 +352,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) return -ENOMEM; ppgtt->dev = dev; - ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); -- cgit v1.2.3 From 67167240063c9eff15d60754c8d786a7a237ffa2 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:18 -0700 Subject: drm/i915: Combine scratch members into a struct There isn't any special reason to do this other than it makes it obvious that the two members are connected. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 6 ++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 17 ++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af8eadf796a8..40bb23b3fa19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -465,8 +465,10 @@ struct i915_gtt { void __iomem *gsm; bool do_idle_maps; - dma_addr_t scratch_page_dma; - struct page *scratch_page; + struct { + dma_addr_t addr; + struct page *page; + } scratch; /* global gtt ops */ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 46b9e32b1109..cdd7b45cd501 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -195,7 +195,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned last_pte, i; scratch_pte = ppgtt->pte_encode(ppgtt->dev, - dev_priv->gtt.scratch_page_dma, + dev_priv->gtt.scratch.addr, I915_CACHE_LLC); while (num_entries) { @@ -521,8 +521,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = dev_priv->gtt.pte_encode(dev, - dev_priv->gtt.scratch_page_dma, + scratch_pte = dev_priv->gtt.pte_encode(dev, dev_priv->gtt.scratch.addr, I915_CACHE_LLC); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); @@ -727,8 +726,8 @@ static int setup_scratch_page(struct drm_device *dev) #else dma_addr = page_to_phys(page); #endif - dev_priv->gtt.scratch_page = page; - dev_priv->gtt.scratch_page_dma = dma_addr; + dev_priv->gtt.scratch.page = page; + dev_priv->gtt.scratch.addr = dma_addr; return 0; } @@ -736,11 +735,11 @@ static int setup_scratch_page(struct drm_device *dev) static void teardown_scratch_page(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - set_pages_wb(dev_priv->gtt.scratch_page, 1); - pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, + set_pages_wb(dev_priv->gtt.scratch.page, 1); + pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(dev_priv->gtt.scratch_page); - __free_page(dev_priv->gtt.scratch_page); + put_page(dev_priv->gtt.scratch.page); + __free_page(dev_priv->gtt.scratch.page); } static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -- cgit v1.2.3 From 80a74f7f9c3e57123b6c3d314d4340fc8195a524 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:19 -0700 Subject: drm/i915: Drop dev from pte_encode The original pte_encode function needed the dev argument so we could do platform specific handling via IS_GENX, etc. With the merging of a pte encoding function there should never been a need to quirk away gen specific details. The patch doesn't do much but makes the upcoming reworks in gtt/ppgtt/mm slightly (albeit, ever so) easier. Reviewed-by: Kenneth Graunke Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 6 ++---- drivers/gpu/drm/i915/i915_gem_gtt.c | 21 ++++++++------------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 40bb23b3fa19..438cb0ab6889 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -482,8 +482,7 @@ struct i915_gtt { struct sg_table *st, unsigned int pg_start, enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, - dma_addr_t addr, + gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level); }; #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) @@ -503,8 +502,7 @@ struct i915_hw_ppgtt { struct sg_table *st, unsigned int pg_start, enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, - dma_addr_t addr, + gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level); int (*enable)(struct drm_device *dev); void (*cleanup)(struct i915_hw_ppgtt *ppgtt); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cdd7b45cd501..42b5a4fc3932 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -45,8 +45,7 @@ #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) -static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, - dma_addr_t addr, +static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; @@ -72,8 +71,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, #define BYT_PTE_WRITEABLE (1 << 1) #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) -static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, - dma_addr_t addr, +static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; @@ -90,8 +88,7 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, return pte; } -static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, - dma_addr_t addr, +static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; @@ -194,8 +191,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = ppgtt->pte_encode(ppgtt->dev, - dev_priv->gtt.scratch.addr, + scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr, I915_CACHE_LLC); while (num_entries) { @@ -231,8 +227,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, dma_addr_t page_addr; page_addr = sg_page_iter_dma_address(&sg_iter); - pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, - cache_level); + pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level); if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); act_pt++; @@ -483,7 +478,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), + iowrite32(dev_priv->gtt.pte_encode(addr, level), >t_entries[i]); i++; } @@ -496,7 +491,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, */ if (i != 0) WARN_ON(readl(>t_entries[i-1]) - != dev_priv->gtt.pte_encode(dev, addr, level)); + != dev_priv->gtt.pte_encode(addr, level)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -521,7 +516,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = dev_priv->gtt.pte_encode(dev, dev_priv->gtt.scratch.addr, + scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr, I915_CACHE_LLC); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); -- cgit v1.2.3 From b2f21b4dfdd1e7396a99312c35092c8bb486a699 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:20 -0700 Subject: drm/i915: Use gtt shortform where possible Just for compactness. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 42b5a4fc3932..66929eac6367 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -846,34 +846,28 @@ int i915_gem_gtt_init(struct drm_device *dev) int ret; if (INTEL_INFO(dev)->gen <= 5) { - dev_priv->gtt.gtt_probe = i915_gmch_probe; - dev_priv->gtt.gtt_remove = i915_gmch_remove; + gtt->gtt_probe = i915_gmch_probe; + gtt->gtt_remove = i915_gmch_remove; } else { - dev_priv->gtt.gtt_probe = gen6_gmch_probe; - dev_priv->gtt.gtt_remove = gen6_gmch_remove; - if (IS_HASWELL(dev)) { - dev_priv->gtt.pte_encode = hsw_pte_encode; - } else if (IS_VALLEYVIEW(dev)) { - dev_priv->gtt.pte_encode = byt_pte_encode; - } else { - dev_priv->gtt.pte_encode = gen6_pte_encode; - } + gtt->gtt_probe = gen6_gmch_probe; + gtt->gtt_remove = gen6_gmch_remove; + if (IS_HASWELL(dev)) + gtt->pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(dev)) + gtt->pte_encode = byt_pte_encode; + else + gtt->pte_encode = gen6_pte_encode; } - ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, - &dev_priv->gtt.stolen_size, - >t->mappable_base, - >t->mappable_end); + ret = gtt->gtt_probe(dev, >t->total, >t->stolen_size, + >t->mappable_base, >t->mappable_end); if (ret) return ret; /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_INFO("Memory usable by graphics device = %zdM\n", - dev_priv->gtt.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %ldM\n", - dev_priv->gtt.mappable_end >> 20); - DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", - dev_priv->gtt.stolen_size >> 20); + DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); return 0; } -- cgit v1.2.3 From 5c3fe8b03ea6eb61617edb390d51c08609a495f7 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:21 -0700 Subject: drm/i915: Move fbc members out of line Signed-off-by: Ben Widawsky [danvet: Resolve conflict with Damien's FBC_CHIP_DEFAULT no fbc reason.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 50 +++++++++++++++++++-------------- drivers/gpu/drm/i915/i915_gem_stolen.c | 20 ++++++------- drivers/gpu/drm/i915/intel_display.c | 6 ++-- drivers/gpu/drm/i915/intel_drv.h | 7 ----- drivers/gpu/drm/i915/intel_pm.c | 51 +++++++++++++++++----------------- 6 files changed, 69 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c16926ca15be..f82134f8e9fb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1492,7 +1492,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) seq_puts(m, "FBC enabled\n"); } else { seq_puts(m, "FBC disabled: "); - switch (dev_priv->no_fbc_reason) { + switch (dev_priv->fbc.no_fbc_reason) { case FBC_NO_OUTPUT: seq_puts(m, "no outputs"); break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 438cb0ab6889..d1db73a5ba0c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -528,18 +528,36 @@ struct i915_hw_context { struct i915_ctx_hang_stats hang_stats; }; -enum no_fbc_reason { - FBC_NO_OUTPUT, /* no outputs enabled to compress */ - FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ - FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ - FBC_MODE_TOO_LARGE, /* mode too large for compression */ - FBC_BAD_PLANE, /* fbc not supported on plane */ - FBC_NOT_TILED, /* buffer not tiled */ - FBC_MULTIPLE_PIPES, /* more than one pipe active */ - FBC_MODULE_PARAM, - FBC_CHIP_DEFAULT, /* disabled by default on this chip */ +struct i915_fbc { + unsigned long size; + unsigned int fb_id; + enum plane plane; + int y; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; + + struct intel_fbc_work { + struct delayed_work work; + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + int interval; + } *fbc_work; + + enum { + FBC_NO_OUTPUT, /* no outputs enabled to compress */ + FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ + FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ + FBC_MODE_TOO_LARGE, /* mode too large for compression */ + FBC_BAD_PLANE, /* fbc not supported on plane */ + FBC_NOT_TILED, /* buffer not tiled */ + FBC_MULTIPLE_PIPES, /* more than one pipe active */ + FBC_MODULE_PARAM, + FBC_CHIP_DEFAULT, /* disabled by default on this chip */ + } no_fbc_reason; }; + enum intel_pch { PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ @@ -1059,12 +1077,7 @@ typedef struct drm_i915_private { int num_plane; - unsigned long cfb_size; - unsigned int cfb_fb; - enum plane cfb_plane; - int cfb_y; - struct intel_fbc_work *fbc_work; - + struct i915_fbc fbc; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1142,11 +1155,6 @@ typedef struct drm_i915_private { /* Haswell power well */ struct i915_power_well power_well; - enum no_fbc_reason no_fbc_reason; - - struct drm_mm_node *compressed_fb; - struct drm_mm_node *compressed_llb; - struct i915_gpu_error gpu_error; struct drm_i915_gem_object *vlv_pctx; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f713294618fe..8e023447b7eb 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -120,7 +120,7 @@ static int i915_setup_compression(struct drm_device *dev, int size) if (!compressed_llb) goto err_fb; - dev_priv->compressed_llb = compressed_llb; + dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, dev_priv->mm.stolen_base + compressed_fb->start); @@ -128,8 +128,8 @@ static int i915_setup_compression(struct drm_device *dev, int size) dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->compressed_fb = compressed_fb; - dev_priv->cfb_size = size; + dev_priv->fbc.compressed_fb = compressed_fb; + dev_priv->fbc.size = size; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); @@ -150,7 +150,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) if (dev_priv->mm.stolen_base == 0) return -ENODEV; - if (size < dev_priv->cfb_size) + if (size < dev_priv->fbc.size) return 0; /* Release any current block */ @@ -163,16 +163,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->cfb_size == 0) + if (dev_priv->fbc.size == 0) return; - if (dev_priv->compressed_fb) - drm_mm_put_block(dev_priv->compressed_fb); + if (dev_priv->fbc.compressed_fb) + drm_mm_put_block(dev_priv->fbc.compressed_fb); - if (dev_priv->compressed_llb) - drm_mm_put_block(dev_priv->compressed_llb); + if (dev_priv->fbc.compressed_llb) + drm_mm_put_block(dev_priv->fbc.compressed_llb); - dev_priv->cfb_size = 0; + dev_priv->fbc.size = 0; } void i915_gem_cleanup_stolen(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d60684d9847c..112120e909ee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3408,7 +3408,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); intel_crtc_update_cursor(crtc, false); @@ -3481,7 +3481,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) drm_vblank_off(dev, pipe); /* FBC must be disabled before disabling the plane on HSW. */ - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); hsw_disable_ips(intel_crtc); @@ -3720,7 +3720,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); intel_crtc_dpms_overlay(intel_crtc, false); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6f7f33e70e91..5dfc1a0f2351 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -549,13 +549,6 @@ struct intel_unpin_work { bool enable_stall_check; }; -struct intel_fbc_work { - struct delayed_work work; - struct drm_crtc *crtc; - struct drm_framebuffer *fb; - int interval; -}; - int intel_pch_rawclk(struct drm_device *dev); int intel_connector_update_modes(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index aa48fc6ba4db..5b4ade682bd6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -87,7 +87,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) int plane, i; u32 fbc_ctl, fbc_ctl2; - cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; if (fb->pitches[0] < cfb_pitch) cfb_pitch = fb->pitches[0]; @@ -326,7 +326,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) struct drm_i915_private *dev_priv = dev->dev_private; mutex_lock(&dev->struct_mutex); - if (work == dev_priv->fbc_work) { + if (work == dev_priv->fbc.fbc_work) { /* Double check that we haven't switched fb without cancelling * the prior work. */ @@ -334,12 +334,12 @@ static void intel_fbc_work_fn(struct work_struct *__work) dev_priv->display.enable_fbc(work->crtc, work->interval); - dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; - dev_priv->cfb_fb = work->crtc->fb->base.id; - dev_priv->cfb_y = work->crtc->y; + dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; + dev_priv->fbc.fb_id = work->crtc->fb->base.id; + dev_priv->fbc.y = work->crtc->y; } - dev_priv->fbc_work = NULL; + dev_priv->fbc.fbc_work = NULL; } mutex_unlock(&dev->struct_mutex); @@ -348,25 +348,25 @@ static void intel_fbc_work_fn(struct work_struct *__work) static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) { - if (dev_priv->fbc_work == NULL) + if (dev_priv->fbc.fbc_work == NULL) return; DRM_DEBUG_KMS("cancelling pending FBC enable\n"); /* Synchronisation is provided by struct_mutex and checking of - * dev_priv->fbc_work, so we can perform the cancellation + * dev_priv->fbc.fbc_work, so we can perform the cancellation * entirely asynchronously. */ - if (cancel_delayed_work(&dev_priv->fbc_work->work)) + if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) /* tasklet was killed before being run, clean up */ - kfree(dev_priv->fbc_work); + kfree(dev_priv->fbc.fbc_work); /* Mark the work as no longer wanted so that if it does * wake-up (because the work was already running and waiting * for our mutex), it will discover that is no longer * necessary to run. */ - dev_priv->fbc_work = NULL; + dev_priv->fbc.fbc_work = NULL; } static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) @@ -392,7 +392,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) work->interval = interval; INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); - dev_priv->fbc_work = work; + dev_priv->fbc.fbc_work = work; /* Delay the actual enabling to let pageflipping cease and the * display to settle before starting the compression. Note that @@ -418,7 +418,7 @@ void intel_disable_fbc(struct drm_device *dev) return; dev_priv->display.disable_fbc(dev); - dev_priv->cfb_plane = -1; + dev_priv->fbc.plane = -1; } /** @@ -470,7 +470,8 @@ void intel_update_fbc(struct drm_device *dev) !to_intel_crtc(tmp_crtc)->primary_disabled) { if (crtc) { DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + dev_priv->fbc.no_fbc_reason = + FBC_MULTIPLE_PIPES; goto out_disable; } crtc = tmp_crtc; @@ -479,7 +480,7 @@ void intel_update_fbc(struct drm_device *dev) if (!crtc || crtc->fb == NULL) { DRM_DEBUG_KMS("no output, disabling\n"); - dev_priv->no_fbc_reason = FBC_NO_OUTPUT; + dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT; goto out_disable; } @@ -491,19 +492,19 @@ void intel_update_fbc(struct drm_device *dev) if (i915_enable_fbc < 0 && INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { DRM_DEBUG_KMS("disabled per chip default\n"); - dev_priv->no_fbc_reason = FBC_CHIP_DEFAULT; + dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT; goto out_disable; } if (!i915_enable_fbc) { DRM_DEBUG_KMS("fbc disabled per module param\n"); - dev_priv->no_fbc_reason = FBC_MODULE_PARAM; + dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM; goto out_disable; } if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " "disabling\n"); - dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } @@ -517,13 +518,13 @@ void intel_update_fbc(struct drm_device *dev) if ((crtc->mode.hdisplay > max_hdisplay) || (crtc->mode.vdisplay > max_vdisplay)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); - dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; } if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && intel_crtc->plane != 0) { DRM_DEBUG_KMS("plane not 0, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_BAD_PLANE; + dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } @@ -533,7 +534,7 @@ void intel_update_fbc(struct drm_device *dev) if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_NOT_TILED; + dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED; goto out_disable; } @@ -543,7 +544,7 @@ void intel_update_fbc(struct drm_device *dev) if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } @@ -552,9 +553,9 @@ void intel_update_fbc(struct drm_device *dev) * cannot be unpinned (and have its GTT offset and fence revoked) * without first being decoupled from the scanout and FBC disabled. */ - if (dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_fb == fb->base.id && - dev_priv->cfb_y == crtc->y) + if (dev_priv->fbc.plane == intel_crtc->plane && + dev_priv->fbc.fb_id == fb->base.id && + dev_priv->fbc.y == crtc->y) return; if (intel_fbc_enabled(dev)) { -- cgit v1.2.3 From 911bdf0ae6405db3313c6e5798cf08640fdd0714 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 27 Jun 2013 16:30:23 -0700 Subject: drm/i915: Move gtt_mtrr to i915_gtt for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i "s/mm.gtt_mtrr/gtt.mtrr/" $file; done Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index adb319b53ecd..0e2214236083 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1558,8 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_rmmap; } - dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, - aperture_size); + dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, + aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed @@ -1667,7 +1667,7 @@ out_gem_unload: intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); out_mtrrfree: - arch_phys_wc_del(dev_priv->mm.gtt_mtrr); + arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); dev_priv->gtt.gtt_remove(dev); out_rmmap: @@ -1705,7 +1705,7 @@ int i915_driver_unload(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->gtt.mappable); - arch_phys_wc_del(dev_priv->mm.gtt_mtrr); + arch_phys_wc_del(dev_priv->gtt.mtrr); acpi_video_unregister(); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d1db73a5ba0c..da07f6f28854 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -470,6 +470,8 @@ struct i915_gtt { struct page *page; } scratch; + int mtrr; + /* global gtt ops */ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, size_t *stolen, phys_addr_t *mappable_base, @@ -834,8 +836,6 @@ struct i915_gem_mm { /** Usable portion of the GTT for GEM */ unsigned long stolen_base; /* limited to low memory (32-bit) */ - int gtt_mtrr; - /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; -- cgit v1.2.3 From c93f54cf7de31d44b4036d0d1e291172b2bd5743 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 27 Jun 2013 19:47:19 +0200 Subject: drm/i915: pixel multiplier readout support for pch ports Now that we painstakingly track the shared pch dplls we can finally implement pixel mutliplier readout support for pch ports, too. v2: Undo the temporary hack to disable the sdvo pixel multiplier cross-checking. Cc: Imre Deak Cc: Jesse Barnes Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 12 ++++++------ drivers/gpu/drm/i915/intel_sdvo.c | 3 --- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 112120e909ee..65e8f5e512d2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5815,10 +5815,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); - /* XXX: Can't properly read out the pch dpll pixel multiplier - * since we don't have state tracking for pch clocks yet. */ - pipe_config->pixel_multiplier = 1; - if (HAS_PCH_IBX(dev_priv->dev)) { pipe_config->shared_dpll = crtc->pipe; } else { @@ -5833,6 +5829,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, WARN_ON(!pll->get_hw_state(dev_priv, pll, &pipe_config->dpll_hw_state)); + + tmp = pipe_config->dpll_hw_state.dpll; + pipe_config->pixel_multiplier = + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; } else { pipe_config->pixel_multiplier = 1; } @@ -8083,8 +8084,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); - if (!HAS_PCH_SPLIT(dev)) - PIPE_CONF_CHECK_I(pixel_multiplier); + PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, DRM_MODE_FLAG_INTERLACE); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2628d5622449..8415d6a610dd 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1370,9 +1370,6 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, break; } - if(HAS_PCH_SPLIT(dev)) - return; /* no pixel multiplier readout support yet */ - WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", pipe_config->pixel_multiplier, encoder_pixel_multiplier); -- cgit v1.2.3 From 2385bdf0787aef45ee1847b8508a417433da7e14 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 26 Jun 2013 01:38:15 +0300 Subject: drm/i915: add fastboot param for fast & loose mode setting Handling all the state properly for fastboot is still not yet done by far, but we need some way to be able to test what we currently have. So hide the not-yet-quite-complete stuff behind a module option. Signed-off-by: Jesse Barnes [danvet: Add a real commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 062cbda1bf4a..33cb97388fc9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -132,6 +132,11 @@ int i915_enable_ips __read_mostly = 1; module_param_named(enable_ips, i915_enable_ips, int, 0600); MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +bool i915_fastboot __read_mostly = 0; +module_param_named(fastboot, i915_fastboot, bool, 0600); +MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " + "(default: false)"); + static struct drm_driver driver; extern int intel_agp_enabled; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index da07f6f28854..4a23e957b85c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1551,6 +1551,7 @@ extern int i915_enable_ppgtt __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; +extern bool i915_fastboot __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); -- cgit v1.2.3 From f1f644dc66cbaf5a4c7dcde683361536b41885b9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 27 Jun 2013 00:39:25 +0300 Subject: drm/i915: get mode clock when reading the pipe config v9 We need this for comparing modes between configuration changes. The tricky part is to allow us to reuse the new get_clock stuff to recover the lvds clock on gen2/3 when neither the vbt has an lvds mode nor the panel a (useful) EDID. v2: try harder to calulate non-simple pixel clocks (Daniel) call get_clock after getting the encoder config, needed for pixel multiply (Jesse) v3: drop get_clock now that the pixel_multiply has been moved into get_pipe_config v4: re-add get_clock; we need to get the pixel multiplier in the encoder, so need to calculate the clock value after the encoder's get_config is called v5: drop hsw clock_get, still needs to be written v6: add fuzzy clock check (Daniel) v7: wrap fuzzy clock check under !IS_HASWELL use port_clock field rather than a new CPU eDP clock field in crtc_config v8: remove stale pixel_multiplier sets (Daniel) multiply by pixel_multiplier in 9xx clock get too (Daniel) v9: make sure we set pixel_multiplier before calling clock_get from mode_get for LVDS (Daniel) Signed-off-by: Jesse Barnes [danvet: Add some explanation to the commit message about why we have to jump through a few hoops. Also remove the rebase-fail hunk from intel_sdvo.c] [danvet: Squash in the fixup from Jesse to also call ->get_clock in the modeset state checker.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 126 ++++++++++++++++++++++++++++++++--- drivers/gpu/drm/i915/intel_dp.c | 7 ++ 3 files changed, 123 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a23e957b85c..810c9fab525e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -367,6 +367,7 @@ struct drm_i915_display_funcs { * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, struct intel_crtc_config *); + void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *); int (*crtc_mode_set)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 65e8f5e512d2..9c05e57fb7ae 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -45,6 +45,11 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); +static void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); +static void ironlake_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); + typedef struct { int min, max; } intel_range_t; @@ -6853,11 +6858,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, } /* Returns the clock of the currently programmed mode of the given pipe. */ -static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) +static void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) { + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + int pipe = pipe_config->cpu_transcoder; u32 dpll = I915_READ(DPLL(pipe)); u32 fp; intel_clock_t clock; @@ -6896,7 +6902,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) default: DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); - return 0; + pipe_config->adjusted_mode.clock = 0; + return; } if (IS_PINEVIEW(dev)) @@ -6933,12 +6940,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } } - /* XXX: It would be nice to validate the clocks, but we can't reuse - * i830PllIsValid() because it relies on the xf86_config connector - * configuration being accurate, which it isn't necessarily. + pipe_config->adjusted_mode.clock = clock.dot * + pipe_config->pixel_multiplier; +} + +static void ironlake_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + int link_freq, repeat; + u64 clock; + u32 link_m, link_n; + + repeat = pipe_config->pixel_multiplier; + + /* + * The calculation for the data clock is: + * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp + * But we want to avoid losing precison if possible, so: + * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) + * + * and the link clock is simpler: + * link_clock = (m * link_clock * repeat) / n + */ + + /* + * We need to get the FDI or DP link clock here to derive + * the M/N dividers. + * + * For FDI, we read it from the BIOS or use a fixed 2.7GHz. + * For DP, it's either 1.62GHz or 2.7GHz. + * We do our calculations in 10*MHz since we don't need much precison. */ + if (pipe_config->has_pch_encoder) + link_freq = intel_fdi_link_freq(dev) * 10000; + else + link_freq = pipe_config->port_clock; + + link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); + link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); + + if (!link_m || !link_n) + return; - return clock.dot; + clock = ((u64)link_m * (u64)link_freq * (u64)repeat); + do_div(clock, link_n); + + pipe_config->adjusted_mode.clock = clock; } /** Returns the currently programmed mode of the given pipe. */ @@ -6949,6 +6999,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; struct drm_display_mode *mode; + struct intel_crtc_config pipe_config; int htot = I915_READ(HTOTAL(cpu_transcoder)); int hsync = I915_READ(HSYNC(cpu_transcoder)); int vtot = I915_READ(VTOTAL(cpu_transcoder)); @@ -6958,7 +7009,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, if (!mode) return NULL; - mode->clock = intel_crtc_clock_get(dev, crtc); + /* + * Construct a pipe_config sufficient for getting the clock info + * back out of crtc_clock_get. + * + * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need + * to use a real value here instead. + */ + pipe_config.cpu_transcoder = intel_crtc->pipe; + pipe_config.pixel_multiplier = 1; + i9xx_crtc_clock_get(intel_crtc, &pipe_config); + + mode->clock = pipe_config.adjusted_mode.clock; mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; @@ -8019,6 +8081,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) } +static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, + struct intel_crtc_config *new) +{ + int clock1, clock2, diff; + + clock1 = cur->adjusted_mode.clock; + clock2 = new->adjusted_mode.clock; + + if (clock1 == clock2) + return true; + + if (!clock1 || !clock2) + return false; + + diff = abs(clock1 - clock2); + + if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) + return true; + + return false; +} + #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ list_for_each_entry((intel_crtc), \ &(dev)->mode_config.crtc_list, \ @@ -8124,6 +8208,15 @@ intel_pipe_config_compare(struct drm_device *dev, #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_QUIRK + if (!IS_HASWELL(dev)) { + if (!intel_fuzzy_clock_check(current_config, pipe_config)) { + DRM_ERROR("mismatch in clock (expected %d, found %d\n", + current_config->adjusted_mode.clock, + pipe_config->adjusted_mode.clock); + return false; + } + } + return true; } @@ -8249,8 +8342,12 @@ check_crtc_state(struct drm_device *dev) base.head) { if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config) + if (encoder->get_config && + dev_priv->display.get_clock) { encoder->get_config(encoder, &pipe_config); + dev_priv->display.get_clock(crtc, + &pipe_config); + } } WARN(crtc->active != active, @@ -9253,6 +9350,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.get_clock = ironlake_crtc_clock_get; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; @@ -9260,6 +9358,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9267,6 +9366,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = i9xx_update_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9813,8 +9913,12 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; - if (encoder->get_config) + if (encoder->get_config && + dev_priv->display.get_clock) { encoder->get_config(encoder, &crtc->config); + dev_priv->display.get_clock(crtc, + &crtc->config); + } } else { encoder->base.crtc = NULL; } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b73971234013..11eb697dec01 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1355,6 +1355,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } pipe_config->adjusted_mode.flags |= flags; + + if (dp_to_dig_port(intel_dp)->port == PORT_A) { + if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) + pipe_config->port_clock = 162000; + else + pipe_config->port_clock = 270000; + } } static void intel_disable_dp(struct intel_encoder *encoder) -- cgit v1.2.3 From babea61dfb82b4bdfdbc57ebf081ef6c16ffd524 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 26 Jun 2013 18:57:38 +0300 Subject: drm/i915: copy fetched mode state into crtc at setup_hw time v5 We already fetch and track other state into the main CRTC and encoder structs, and for fastboot we need to do the same with the mode and clock data we read out. v2: fix debug print v3: use fastboot param around state copy v4: set clock and flags for crtc here instead of in setup_hw_state v5: rename function to intel_crtc_mode_from_pipe_config for consistency (Chris) Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 37 ++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9c05e57fb7ae..2e6fc4ce8ec9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4716,6 +4716,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; } +static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_crtc *crtc = &intel_crtc->base; + + crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; + crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal; + crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; + crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; + + crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; + crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal; + crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; + crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; + + crtc->mode.flags = pipe_config->adjusted_mode.flags; + + crtc->mode.clock = pipe_config->adjusted_mode.clock; + crtc->mode.flags |= pipe_config->adjusted_mode.flags; +} + static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; @@ -9961,6 +9982,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_readout_hw_state(dev); + /* + * Now that we have the config, copy it to each CRTC struct + * Note that this could go away if we move to using crtc_config + * checking everywhere. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (crtc->active && i915_fastboot) { + intel_crtc_mode_from_pipe_config(crtc, &crtc->config); + + DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", + crtc->base.base.id); + drm_mode_debug_printmodeline(&crtc->base.mode); + } + } + /* HW state is read out, now we need to sanitize this mess. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { -- cgit v1.2.3 From 4d6a3e63bce0cb604864e36585ca8983160a421a Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 26 Jun 2013 01:38:18 +0300 Subject: drm/i915: turn off panel fitting at flip time if needed v2 Need better pfit tracking to do this right. v2: use fastboot param around this hack Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2e6fc4ce8ec9..6c0014d84812 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2221,6 +2221,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } + /* Update pipe size and adjust fitter if needed */ + if (i915_fastboot) { + I915_WRITE(PIPESRC(intel_crtc->pipe), + ((crtc->mode.hdisplay - 1) << 16) | + (crtc->mode.vdisplay - 1)); + if (!intel_crtc->config.pch_pfit.size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + I915_WRITE(PF_CTL(intel_crtc->pipe), 0); + I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); + I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); + } + } + ret = dev_priv->display.update_plane(crtc, fb, x, y); if (ret) { intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); -- cgit v1.2.3 From 319d9827ebb55d58d1b02d8a4eba48bbb2702376 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 26 Jun 2013 01:38:19 +0300 Subject: drm/i915: flip on a no fb -> fb transition if crtc is active v3 If the crtc is active, we can simply flip a new fb onto it, provided the other mode setting reqs are met. Otherwise, we'll need to do a full mode set to re-enable the crtc. v2: check for crtc active and set mode_changed accordingly v3: add module parameter, i915.fastboot, to control no fb -> fb flip behavior Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6c0014d84812..2ab34e57ba4a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8676,8 +8676,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, } else if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { - DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); - config->mode_changed = true; + struct intel_crtc *intel_crtc = + to_intel_crtc(set->crtc); + + if (intel_crtc->active && i915_fastboot) { + DRM_DEBUG_KMS("crtc has no fb, will flip\n"); + config->fb_changed = true; + } else { + DRM_DEBUG_KMS("inactive crtc, full mode set\n"); + config->mode_changed = true; + } } else if (set->fb == NULL) { config->mode_changed = true; } else if (set->fb->pixel_format != -- cgit v1.2.3 From fc16b48be665d94337a861486dd25499971742a2 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 6 Jun 2013 15:18:39 +0300 Subject: drm/i915: export error state to string conversion In preparation for accessing error state from sysfs, export error state to string conversion function. Also tuck buffer error handling inside the function. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 24 ++++++++---------------- drivers/gpu/drm/i915/i915_drv.h | 7 +++++++ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f82134f8e9fb..b64af15eb388 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -830,15 +830,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); } -struct i915_error_state_file_priv { - struct drm_device *dev; - struct drm_i915_error_state *error; -}; - - -static int i915_error_state(struct i915_error_state_file_priv *error_priv, - struct drm_i915_error_state_buf *m) - +int i915_error_state_to_str(struct drm_i915_error_state_buf *m, + const struct i915_error_state_file_priv *error_priv) { struct drm_device *dev = error_priv->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -848,7 +841,7 @@ static int i915_error_state(struct i915_error_state_file_priv *error_priv, if (!error) { err_printf(m, "no error state collected\n"); - return 0; + goto out; } err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, @@ -958,6 +951,10 @@ static int i915_error_state(struct i915_error_state_file_priv *error_priv, if (error->display) intel_display_print_error_state(m, dev, error->display); +out: + if (m->bytes == 0 && m->err) + return m->err; + return 0; } @@ -1051,15 +1048,10 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, error_str.start = *pos; - ret = i915_error_state(error_priv, &error_str); + ret = i915_error_state_to_str(&error_str, error_priv); if (ret) goto out; - if (error_str.bytes == 0 && error_str.err) { - ret = error_str.err; - goto out; - } - ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, error_str.buf, error_str.bytes); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 810c9fab525e..52fe9763b969 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -916,6 +916,11 @@ struct drm_i915_error_state_buf { loff_t pos; }; +struct i915_error_state_file_priv { + struct drm_device *dev; + struct drm_i915_error_state *error; +}; + struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ @@ -1921,6 +1926,8 @@ int i915_debugfs_init(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor); __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); +int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, + const struct i915_error_state_file_priv *error); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); -- cgit v1.2.3 From 95d5bfb3ac4cf5d7311f496761506c676f6b6323 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 6 Jun 2013 15:18:40 +0300 Subject: drm/i915: export error state ref handling In preparation for sysfs error state access, export ref error state ref counting interface. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 31 ++++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_drv.h | 3 +++ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b64af15eb388..eef4c01ab61a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -980,12 +980,30 @@ i915_error_state_write(struct file *filp, return cnt; } +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error_priv->error = dev_priv->gpu_error.first_error; + if (error_priv->error) + kref_get(&error_priv->error->ref); + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + +} + +void i915_error_state_put(struct i915_error_state_file_priv *error_priv) +{ + if (error_priv->error) + kref_put(&error_priv->error->ref, i915_error_state_free); +} + static int i915_error_state_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - drm_i915_private_t *dev_priv = dev->dev_private; struct i915_error_state_file_priv *error_priv; - unsigned long flags; error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); if (!error_priv) @@ -993,11 +1011,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file) error_priv->dev = dev; - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error_priv->error = dev_priv->gpu_error.first_error; - if (error_priv->error) - kref_get(&error_priv->error->ref); - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + i915_error_state_get(dev, error_priv); file->private_data = error_priv; @@ -1008,8 +1022,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file) { struct i915_error_state_file_priv *error_priv = file->private_data; - if (error_priv->error) - kref_put(&error_priv->error->ref, i915_error_state_free); + i915_error_state_put(error_priv); kfree(error_priv); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 52fe9763b969..bc4a84ac04cb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1928,6 +1928,9 @@ __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, const struct i915_error_state_file_priv *error); +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv); +void i915_error_state_put(struct i915_error_state_file_priv *error_priv); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); -- cgit v1.2.3 From 4dc955f7f5241a92767e2b3ffd74f49a82938999 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 6 Jun 2013 15:18:41 +0300 Subject: drm/i915: introduce i915_error_state_buf_init Make function for struct i915_error_state_buf initialization and export it, for sysfs and debugfs. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 50 ++++++++++++++++++++++--------------- drivers/gpu/drm/i915/i915_drv.h | 7 ++++++ 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eef4c01ab61a..3e36756d0439 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1028,38 +1028,48 @@ static int i915_error_state_release(struct inode *inode, struct file *file) return 0; } -static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, - size_t count, loff_t *pos) +int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count, loff_t pos) { - struct i915_error_state_file_priv *error_priv = file->private_data; - struct drm_i915_error_state_buf error_str; - loff_t tmp_pos = 0; - ssize_t ret_count = 0; - int ret = 0; - - memset(&error_str, 0, sizeof(error_str)); + memset(ebuf, 0, sizeof(*ebuf)); /* We need to have enough room to store any i915_error_state printf * so that we can move it to start position. */ - error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; - error_str.buf = kmalloc(error_str.size, + ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); - if (error_str.buf == NULL) { - error_str.size = PAGE_SIZE; - error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); + if (ebuf->buf == NULL) { + ebuf->size = PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); } - if (error_str.buf == NULL) { - error_str.size = 128; - error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); + if (ebuf->buf == NULL) { + ebuf->size = 128; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); } - if (error_str.buf == NULL) + if (ebuf->buf == NULL) return -ENOMEM; - error_str.start = *pos; + ebuf->start = pos; + + return 0; +} + +static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, + size_t count, loff_t *pos) +{ + struct i915_error_state_file_priv *error_priv = file->private_data; + struct drm_i915_error_state_buf error_str; + loff_t tmp_pos = 0; + ssize_t ret_count = 0; + int ret; + + ret = i915_error_state_buf_init(&error_str, count, *pos); + if (ret) + return ret; ret = i915_error_state_to_str(&error_str, error_priv); if (ret) @@ -1074,7 +1084,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, else *pos = error_str.start + ret_count; out: - kfree(error_str.buf); + i915_error_state_buf_release(&error_str); return ret ?: ret_count; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc4a84ac04cb..a7c2cfb3ee97 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1931,6 +1931,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); +int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count, loff_t pos); +static inline void i915_error_state_buf_release( + struct drm_i915_error_state_buf *eb) +{ + kfree(eb->buf); +} /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); -- cgit v1.2.3 From ef86ddced720fddc3835558447a7f594d3609c73 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Thu, 6 Jun 2013 17:38:54 +0300 Subject: drm/i915: add error_state sysfs entry As getting error state doesn't anymore require big kmallocs, make error state accessible also from sysfs. v2: - error state clearing (Chris Wilson) - user hint, proper access mode bits and name (Daniel Vetter) v3: release resources in proper order (Chris Wilson) Suggested-by: Daniel Vetter Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson [danvet: Apply Chris' s/error_state/error/ bikeshed on the sysfs name. Also update the dmesg message, spotted by Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 3 +- drivers/gpu/drm/i915/i915_sysfs.c | 71 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6ca6097aa48f..4c1b1e3dbf79 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1921,8 +1921,7 @@ static void i915_capture_error_state(struct drm_device *dev) } DRM_INFO("capturing error event; look for more information in " - "/sys/kernel/debug/dri/%d/i915_error_state\n", - dev->primary->index); + "/sys/class/drm/card%d/error\n", dev->primary->index); kref_init(&error->ref); error->eir = I915_READ(EIR); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 6875b5654c63..a777e7f3b0df 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = { NULL, }; +static ssize_t error_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct i915_error_state_file_priv error_priv; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + memset(&error_priv, 0, sizeof(error_priv)); + + ret = i915_error_state_buf_init(&error_str, count, off); + if (ret) + return ret; + + error_priv.dev = dev; + i915_error_state_get(dev, &error_priv); + + ret = i915_error_state_to_str(&error_str, &error_priv); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_put(&error_priv); + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +static ssize_t error_state_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + int ret; + + DRM_DEBUG_DRIVER("Resetting error state\n"); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + i915_destroy_error_state(dev); + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static struct bin_attribute error_state_attr = { + .attr.name = "error", + .attr.mode = S_IRUSR | S_IWUSR, + .size = 0, + .read = error_state_read, + .write = error_state_write, +}; + void i915_setup_sysfs(struct drm_device *dev) { int ret; @@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev) if (ret) DRM_ERROR("gen6 sysfs setup failed\n"); } + + ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, + &error_state_attr); + if (ret) + DRM_ERROR("error_state sysfs setup failed\n"); } void i915_teardown_sysfs(struct drm_device *dev) { + sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); #ifdef CONFIG_PM -- cgit v1.2.3 From 6f02488e3a5cbd76974b9d56140b11f3aa012124 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 1 Jul 2013 10:19:09 -0700 Subject: drm/i915: fixup messages in pipe_config_compare Print out the flag that failed and fix up a mismatched paren. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2ab34e57ba4a..6b0013cc962f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8169,7 +8169,7 @@ intel_pipe_config_compare(struct drm_device *dev, #define PIPE_CONF_CHECK_FLAGS(name, mask) \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ - DRM_ERROR("mismatch in " #name " " \ + DRM_ERROR("mismatch in " #name "(" #mask ") " \ "(expected %i, found %i)\n", \ current_config->name & (mask), \ pipe_config->name & (mask)); \ @@ -8245,7 +8245,7 @@ intel_pipe_config_compare(struct drm_device *dev, if (!IS_HASWELL(dev)) { if (!intel_fuzzy_clock_check(current_config, pipe_config)) { - DRM_ERROR("mismatch in clock (expected %d, found %d\n", + DRM_ERROR("mismatch in clock (expected %d, found %d)\n", current_config->adjusted_mode.clock, pipe_config->adjusted_mode.clock); return false; -- cgit v1.2.3 From 510d5f2f6b97eccbfa08234e21b0577c1748807d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 1 Jul 2013 15:50:17 -0700 Subject: drm/i915: split encoder get_config calls from crtc get_clock calls This should help on HSW, where we don't currently have a get_clock call. Reported-by: Paulo Zanoni Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6b0013cc962f..76796b179170 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8377,14 +8377,13 @@ check_crtc_state(struct drm_device *dev) base.head) { if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config && - dev_priv->display.get_clock) { + if (encoder->get_config) encoder->get_config(encoder, &pipe_config); - dev_priv->display.get_clock(crtc, - &pipe_config); - } } + if (dev_priv->display.get_clock) + dev_priv->display.get_clock(crtc, &pipe_config); + WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -9956,12 +9955,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; - if (encoder->get_config && - dev_priv->display.get_clock) { + if (encoder->get_config) encoder->get_config(encoder, &crtc->config); - dev_priv->display.get_clock(crtc, - &crtc->config); - } } else { encoder->base.crtc = NULL; } @@ -9974,6 +9969,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe); } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (!crtc->active) + continue; + if (dev_priv->display.get_clock) + dev_priv->display.get_clock(crtc, + &crtc->config); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { if (connector->get_hw_state(connector)) { -- cgit v1.2.3 From e8474409d7ab6dac38d4a3a6a365504b302f6c16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 26 Jun 2013 17:43:24 +0300 Subject: drm/i915: Use wait_for() to wait for Punit to change GPU freq on VLV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use wait_for() instead of the open coded loop to avoid spreading the same old timeout related bugs. This changes the loop to use msleep(1) instead of udelay(10) when the Punit had not yet completed the frequency change. In practice that doesn't seem to hurt performance as the Punit appears to be ready pretty much always. Also give the status bit a name, instead of using the magic number 1. Signed-off-by: Ville Syrjälä Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 11 ++--------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..9b51be800961 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -363,6 +363,7 @@ #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 #define PUNIT_REG_GPU_FREQ_STS 0xd8 +#define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 5b4ade682bd6..7cfd3b74d9b7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3075,19 +3075,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) { - unsigned long timeout = jiffies + msecs_to_jiffies(10); u32 pval; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - do { - pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - if (time_after(jiffies, timeout)) { - DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); - break; - } - udelay(10); - } while (pval & 1); + if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10)) + DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); pval >>= 8; -- cgit v1.2.3 From e143a21c4d60d13dbdad133b7b2c9d9bb2dfb982 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 12:01:15 +0200 Subject: drm/i915: explicitly cast pipe -> cpu_transcoder This makes sparse happy and also makes it a bit more obvious where we pull off this trick - after all we're only allowed to do it eithe as a default or on platforms where there is no disdinction between the pipe and the cpu transcoder. Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 76796b179170..be15187a49aa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4966,7 +4966,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -5837,7 +5837,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -5953,7 +5953,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, enum intel_display_power_domain pfit_domain; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); @@ -7051,7 +7051,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need * to use a real value here instead. */ - pipe_config.cpu_transcoder = intel_crtc->pipe; + pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; pipe_config.pixel_multiplier = 1; i9xx_crtc_clock_get(intel_crtc, &pipe_config); @@ -7881,7 +7881,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, drm_mode_copy(&pipe_config->adjusted_mode, mode); drm_mode_copy(&pipe_config->requested_mode, mode); - pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; + pipe_config->cpu_transcoder = + (enum transcoder) to_intel_crtc(crtc)->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; /* Compute a starting value for pipe_config->pipe_bpp taking the source -- cgit v1.2.3 From d94ab068277bda17bfeb0e976049035153299a1a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 12:01:16 +0200 Subject: drm/i915: Explicitly cast pipe -> intel_dpll_id We only do this on IBX where there's a fixed pch dpll to pipe assignment. Being explicit about it can't really hurt and makes sparse happy. Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index be15187a49aa..aaa9a752b7b6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3097,7 +3097,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) if (HAS_PCH_IBX(dev_priv->dev)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ - i = crtc->pipe; + i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->shared_dplls[i]; DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", @@ -5856,7 +5856,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv->dev)) { - pipe_config->shared_dpll = crtc->pipe; + pipe_config->shared_dpll = + (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) -- cgit v1.2.3 From 190d6cd5cd3606dd13a3ca5bf0c23dc520659c15 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 13:06:28 +0200 Subject: drm/i915: less magic for stolen preallocated objects w/o gtt offset A magic -1 is a obscure, especially since it's actually passed as an unsigned, so depends upon the magic sign extension rules in C. This has been added in commit 3727d55e4d85836aa6cb759a965daaef88074150 Author: Jesse Barnes Date: Wed May 8 10:45:14 2013 -0700 drm/i915: allow stolen, pre-allocated objects to avoid GTT allocation v2 Use a proper #define instead. Spotted while reviewing Ben's drm_mm_create_block changes. v2: Cast the constant to u32 since otherwise we again have a type mismatch. Suggested by Chris Wilson. Cc: Ben Widawsky Cc: Jesse Barnes Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a7c2cfb3ee97..0a91554e0aa1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1202,6 +1202,7 @@ enum hdmi_force_audio { }; #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) +#define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { /* Interface between the GEM object and its backing storage. diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 8e023447b7eb..0f18d75fa9f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -360,7 +360,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, } /* Some objects just need physical mem from stolen space */ - if (gtt_offset == -1) + if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; /* To simplify the initialisation sequence between KMS and GTT, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7cfd3b74d9b7..0eed35da3ea5 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3474,7 +3474,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, pcbr_offset, - -1, + I915_GTT_OFFSET_NONE, pctx_size); goto out; } -- cgit v1.2.3 From b79480ba5074ae81d1c32073bce3981652e0f717 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 27 Jun 2013 17:52:10 +0200 Subject: drm/i915: assert_spin_locked for pipestat interrupt enable/disable Just to keep the paranoia equal also sprinkle locking asserts over the pipestat interrupt enable/disable functions. Again this results in false positives in the interrupt setup. Add bogo-locking for these and a big comment explaining why it's there and that it's indeed unnecessary. v2: Fix up the spelling fail Paulo spotted in comments. Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4c1b1e3dbf79..c2e11a0fa40c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -319,6 +319,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; + assert_spin_locked(&dev_priv->irq_lock); + if ((pipestat & mask) == mask) return; @@ -334,6 +336,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; + assert_spin_locked(&dev_priv->irq_lock); + if ((pipestat & mask) == 0) return; @@ -2818,6 +2822,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) u32 gt_irqs; u32 enable_mask; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; + unsigned long irqflags; enable_mask = I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | @@ -2843,9 +2848,13 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(PIPESTAT(1), 0xffff); POSTING_READ(VLV_IER); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, pipestat_enable); i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); i915_enable_pipestat(dev_priv, 1, pipestat_enable); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); @@ -3324,6 +3333,7 @@ static int i965_irq_postinstall(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; u32 error_mask; + unsigned long irqflags; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | @@ -3342,7 +3352,11 @@ static int i965_irq_postinstall(struct drm_device *dev) if (IS_G4X(dev)) enable_mask |= I915_BSD_USER_INTERRUPT; + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* * Enable some error detection, note the instruction error mask -- cgit v1.2.3 From b3a070cccb9135f8bec63d9f194ddaa422136fb0 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 5 Jul 2013 14:41:02 -0700 Subject: drm: pre allocate node for create_block For an upcoming patch where we introduce the i915 VMA, it's ideal to have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated). Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this will break a bunch of code, but amongst them are 2 callers of drm_mm_create_block(), both related to stolen memory. It also allows us to embed the drm_mm_node into the object currently which provides a nice transition over to the new code. v2: Reordered to do before ripping out obj->gtt_offset. Some minor cleanups made available because of reordering. v3: s/continue/break on failed stolen node allocation (David) Set obj->gtt_space on failed node allocation (David) Only unref stolen (fix double free) on failed create_stolen (David) Free node, and NULL it in failed create_stolen (David) Add back accidentally removed newline (David) CC: Reviewed-by: David Herrmann Signed-off-by: Ben Widawsky Acked-by: David Airlie Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_mm.c | 16 +++++---------- drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++++---- drivers/gpu/drm/i915/i915_gem_stolen.c | 37 +++++++++++++++++++++++++--------- include/drm/drm_mm.h | 9 +++++---- 4 files changed, 53 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 07cf99cc8862..9e8dfbc1955e 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, } } -struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, - unsigned long start, - unsigned long size, - bool atomic) +int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long start, unsigned long size) { - struct drm_mm_node *hole, *node; + struct drm_mm_node *hole; unsigned long end = start + size; unsigned long hole_start; unsigned long hole_end; @@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, if (hole_start > start || hole_end < end) continue; - node = drm_mm_kmalloc(mm, atomic); - if (unlikely(node == NULL)) - return NULL; - node->start = start; node->size = size; node->mm = mm; @@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, node->hole_follows = 1; } - return node; + return 0; } WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); - return NULL; + return -ENOSPC; } EXPORT_SYMBOL(drm_mm_create_block); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 66929eac6367..88180a597c0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -629,14 +629,26 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + int ret; DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", obj->gtt_offset, obj->base.size); BUG_ON(obj->gtt_space != I915_GTT_RESERVED); - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, - obj->gtt_offset, - obj->base.size, - false); + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL); + if (!obj->gtt_space) { + DRM_ERROR("Failed to preserve object at offset %x\n", + obj->gtt_offset); + continue; + } + ret = drm_mm_create_block(&dev_priv->mm.gtt_space, + obj->gtt_space, + obj->gtt_offset, + obj->base.size); + if (ret) { + DRM_DEBUG_KMS("Reservation failed\n"); + kfree(obj->gtt_space); + obj->gtt_space = NULL; + } obj->has_global_gtt_mapping = 1; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 0f18d75fa9f8..2746ff2d846a 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; + int ret; if (dev_priv->mm.stolen_base == 0) return NULL; @@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (WARN_ON(size == 0)) return NULL; - stolen = drm_mm_create_block(&dev_priv->mm.stolen, - stolen_offset, size, - false); - if (stolen == NULL) { + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) + return NULL; + + ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset, + size); + if (ret) { DRM_DEBUG_KMS("failed to allocate stolen space\n"); + kfree(stolen); return NULL; } @@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, * later. */ if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, - gtt_offset, size, - false); - if (obj->gtt_space == NULL) { + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL); + if (!obj->gtt_space) { + DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n"); + goto unref_out; + } + + ret = drm_mm_create_block(&dev_priv->mm.gtt_space, + obj->gtt_space, + gtt_offset, size); + if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - drm_gem_object_unreference(&obj->base); - return NULL; + goto free_out; } } else obj->gtt_space = I915_GTT_RESERVED; @@ -387,6 +397,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); return obj; + +free_out: + kfree(obj->gtt_space); + obj->gtt_space = NULL; +unref_out: + drm_gem_object_unreference(&obj->base); + return NULL; } void diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 88591ef8fa24..d8b56b7d1839 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) /* * Basic range manager support (drm_mm.c) */ -extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, - unsigned long start, - unsigned long size, - bool atomic); +extern int drm_mm_create_block(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long start, + unsigned long size); extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, unsigned long size, unsigned alignment, @@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic( unsigned long start, unsigned long end, int atomic); + static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, unsigned long size, unsigned alignment) -- cgit v1.2.3 From 338710e7aff3428dc8170a03704a8ae981b58dcd Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 5 Jul 2013 14:41:03 -0700 Subject: drm: Change create block to reserve node With the previous patch we no longer actually create a node, we simply find the correct hole and occupy it. This very well could have been squashed with the last patch, but since I already had David's review, I figured it's easiest to keep it distinct. Also update the users in i915. Conveniently this is the only user of the interface. CC: David Airlie CC: Signed-off-by: Ben Widawsky Acked-by: David Airlie Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_mm.c | 19 ++++++++++--------- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- drivers/gpu/drm/i915/i915_gem_stolen.c | 12 +++++++----- include/drm/drm_mm.h | 5 +---- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 9e8dfbc1955e..52e0ee7f4a6f 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -147,27 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, } } -int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long start, unsigned long size) +int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; - unsigned long end = start + size; + unsigned long end = node->start + node->size; unsigned long hole_start; unsigned long hole_end; + BUG_ON(node == NULL); + + /* Find the relevant hole to add our node to */ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { - if (hole_start > start || hole_end < end) + if (hole_start > node->start || hole_end < end) continue; - node->start = start; - node->size = size; node->mm = mm; node->allocated = 1; INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole->node_list); - if (start == hole_start) { + if (node->start == hole_start) { hole->hole_follows = 0; list_del_init(&hole->hole_stack); } @@ -181,10 +181,11 @@ int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node, return 0; } - WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); + WARN(1, "no hole found for node 0x%lx + 0x%lx\n", + node->start, node->size); return -ENOSPC; } -EXPORT_SYMBOL(drm_mm_create_block); +EXPORT_SYMBOL(drm_mm_reserve_node); struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, unsigned long size, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 88180a597c0a..afba7e5e7739 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -640,10 +640,10 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, obj->gtt_offset); continue; } - ret = drm_mm_create_block(&dev_priv->mm.gtt_space, - obj->gtt_space, - obj->gtt_offset, - obj->base.size); + obj->gtt_space->start = obj->gtt_offset; + obj->gtt_space->size = obj->base.size; + ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, + obj->gtt_space); if (ret) { DRM_DEBUG_KMS("Reservation failed\n"); kfree(obj->gtt_space); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 2746ff2d846a..4e6dbbb47dfe 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -349,8 +349,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (!stolen) return NULL; - ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset, - size); + stolen->start = stolen_offset; + stolen->size = size; + ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen space\n"); kfree(stolen); @@ -380,9 +381,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, goto unref_out; } - ret = drm_mm_create_block(&dev_priv->mm.gtt_space, - obj->gtt_space, - gtt_offset, size); + obj->gtt_space->start = gtt_offset; + obj->gtt_space->size = size; + ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, + obj->gtt_space); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); goto free_out; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d8b56b7d1839..2de91e3da5cc 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -138,10 +138,7 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) /* * Basic range manager support (drm_mm.c) */ -extern int drm_mm_create_block(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long start, - unsigned long size); +extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, unsigned long size, unsigned alignment, -- cgit v1.2.3 From f343c5f6477354967ee1e331a68a56b9fece2f36 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 5 Jul 2013 14:41:04 -0700 Subject: drm/i915: Getter/setter for object attributes Soon we want to gut a lot of our existing assumptions how many address spaces an object can live in, and in doing so, embed the drm_mm_node in the object (and later the VMA). It's possible in the future we'll want to add more getter/setter methods, but for now this is enough to enable the VMAs. v2: Reworked commit message (Ben) Added comments to the main functions (Ben) sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch] (Daniel) v3: Rebased on new reserve_node patch Changed DRM_DEBUG_KMS to actually work (will need fixing later) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++---- drivers/gpu/drm/i915/i915_drv.h | 31 +++++++++++ drivers/gpu/drm/i915/i915_gem.c | 89 +++++++++++++++--------------- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 ++++--- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +-- drivers/gpu/drm/i915/i915_gem_tiling.c | 14 ++--- drivers/gpu/drm/i915/i915_irq.c | 15 +++-- drivers/gpu/drm/i915/i915_trace.h | 8 +-- drivers/gpu/drm/i915/intel_display.c | 28 +++++----- drivers/gpu/drm/i915/intel_fb.c | 8 +-- drivers/gpu/drm/i915/intel_overlay.c | 14 ++--- drivers/gpu/drm/i915/intel_pm.c | 8 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++-- drivers/gpu/drm/i915/intel_sprite.c | 8 ++- 15 files changed, 164 insertions(+), 126 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3e36756d0439..396387ed207a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -122,9 +122,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", obj->pin_count); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - if (obj->gtt_space != NULL) - seq_printf(m, " (gtt offset: %08x, size: %08x)", - obj->gtt_offset, (unsigned int)obj->gtt_space->size); + if (i915_gem_obj_ggtt_bound(obj)) + seq_printf(m, " (gtt offset: %08lx, size: %08x)", + i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj)); if (obj->stolen) seq_printf(m, " (stolen: %08lx)", obj->stolen->start); if (obj->pin_mappable || obj->fault_mappable) { @@ -175,7 +175,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) describe_obj(m, obj); seq_putc(m, '\n'); total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; + total_gtt_size += i915_gem_obj_ggtt_size(obj); count++; } mutex_unlock(&dev->struct_mutex); @@ -187,10 +187,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ - size += obj->gtt_space->size; \ + size += i915_gem_obj_ggtt_size(obj); \ ++count; \ if (obj->map_and_fenceable) { \ - mappable_size += obj->gtt_space->size; \ + mappable_size += i915_gem_obj_ggtt_size(obj); \ ++mappable_count; \ } \ } \ @@ -209,7 +209,7 @@ static int per_file_stats(int id, void *ptr, void *data) stats->count++; stats->total += obj->base.size; - if (obj->gtt_space) { + if (i915_gem_obj_ggtt_bound(obj)) { if (!list_empty(&obj->ring_list)) stats->active += obj->base.size; else @@ -267,11 +267,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data) size = count = mappable_size = mappable_count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->fault_mappable) { - size += obj->gtt_space->size; + size += i915_gem_obj_ggtt_size(obj); ++count; } if (obj->pin_mappable) { - mappable_size += obj->gtt_space->size; + mappable_size += i915_gem_obj_ggtt_size(obj); ++mappable_count; } if (obj->madv == I915_MADV_DONTNEED) { @@ -333,7 +333,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) describe_obj(m, obj); seq_putc(m, '\n'); total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; + total_gtt_size += i915_gem_obj_ggtt_size(obj); count++; } @@ -379,12 +379,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) if (work->old_fb_obj) { struct drm_i915_gem_object *obj = work->old_fb_obj; if (obj) - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_ggtt_offset(obj)); } if (work->pending_flip_obj) { struct drm_i915_gem_object *obj = work->pending_flip_obj; if (obj) - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_ggtt_offset(obj)); } } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a91554e0aa1..8a92174109c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1361,6 +1361,37 @@ struct drm_i915_gem_object { #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +/* Offset of the first PTE pointing to this object */ +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) +{ + return o->gtt_space->start; +} + +/* Whether or not this object is currently mapped by the translation tables */ +static inline bool +i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) +{ + return o->gtt_space != NULL; +} + +/* The size used in the translation tables may be larger than the actual size of + * the object on GEN2/GEN3 because of the way tiling is handled. See + * i915_gem_get_gtt_size() for more details. + */ +static inline unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) +{ + return o->gtt_space->size; +} + +static inline void +i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, + enum i915_cache_level color) +{ + o->gtt_space->color = color; +} + /** * Request queue structure. * diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 769f75262feb..b8a0d91f2e2d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj->gtt_space && !obj->active; + return i915_gem_obj_ggtt_bound(obj) && !obj->active; } int @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) - pinned += obj->gtt_space->size; + pinned += i915_gem_obj_ggtt_size(obj); mutex_unlock(&dev->struct_mutex); args->aper_size = dev_priv->gtt.total; @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - if (obj->gtt_space) { + if (i915_gem_obj_ggtt_bound(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) return ret; @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, user_data = to_user_ptr(args->data_ptr); remain = args->size; - offset = obj->gtt_offset + args->offset; + offset = i915_gem_obj_ggtt_offset(obj) + args->offset; while (remain > 0) { /* Operation in this page @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - if (obj->gtt_space) { + if (i915_gem_obj_ggtt_bound(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; @@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) obj->fault_mappable = true; - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + - page_offset; + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); + pfn >>= PAGE_SHIFT; + pfn += page_offset; /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); @@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages == NULL) return 0; - BUG_ON(obj->gtt_space); + BUG_ON(i915_gem_obj_ggtt_bound(obj)); if (obj->pages_pin_count) return -EBUSY; @@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) { - if (acthd >= obj->gtt_offset && - acthd < obj->gtt_offset + obj->base.size) + if (acthd >= i915_gem_obj_ggtt_offset(obj) && + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) return true; return false; @@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, if (ring->hangcheck.action != wait && i915_request_guilty(request, acthd, &inside)) { - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", ring->name, inside ? "inside" : "flushing", request->batch_obj ? - request->batch_obj->gtt_offset : 0, + i915_gem_obj_ggtt_offset(request->batch_obj) : 0, request->ctx ? request->ctx->id : 0, acthd); @@ -2592,7 +2593,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) drm_i915_private_t *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->gtt_space == NULL) + if (!i915_gem_obj_ggtt_bound(obj)) return 0; if (obj->pin_count) @@ -2675,11 +2676,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, } if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); - val = (uint64_t)((obj->gtt_offset + size - 4096) & + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; @@ -2699,15 +2700,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, u32 val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); int pitch_val; int tile_width; - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - obj->gtt_offset, obj->map_and_fenceable, size); + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; @@ -2718,7 +2719,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_offset; + val = i915_gem_obj_ggtt_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I915_FENCE_SIZE_BITS(size); @@ -2743,19 +2744,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, uint32_t val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); uint32_t pitch_val; - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x not 512K or pot-size 0x%08x aligned\n", - obj->gtt_offset, size); + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", + i915_gem_obj_ggtt_offset(obj), size); pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_offset; + val = i915_gem_obj_ggtt_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I830_FENCE_SIZE_BITS(size); @@ -3044,8 +3045,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) if (obj->cache_level != obj->gtt_space->color) { printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + i915_gem_obj_ggtt_offset(obj), + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), obj->cache_level, obj->gtt_space->color); err++; @@ -3056,8 +3057,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) obj->gtt_space, obj->cache_level)) { printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + i915_gem_obj_ggtt_offset(obj), + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), obj->cache_level); err++; continue; @@ -3169,8 +3170,8 @@ search_free: node->size == fence_size && (node->start & (fence_alignment - 1)) == 0; - mappable = - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; + mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <= + dev_priv->gtt.mappable_end; obj->map_and_fenceable = mappable && fenceable; @@ -3272,7 +3273,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int ret; /* Not valid to be called on unbound objects. */ - if (obj->gtt_space == NULL) + if (!i915_gem_obj_ggtt_bound(obj)) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3337,7 +3338,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - if (obj->gtt_space) { + if (i915_gem_obj_ggtt_bound(obj)) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -3360,7 +3361,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - obj->gtt_space->color = cache_level; + i915_gem_obj_ggtt_set_color(obj, cache_level); } if (cache_level == I915_CACHE_NONE) { @@ -3641,14 +3642,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if (obj->gtt_space != NULL) { - if ((alignment && obj->gtt_offset & (alignment - 1)) || + if (i915_gem_obj_ggtt_bound(obj)) { + if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) || (map_and_fenceable && !obj->map_and_fenceable)) { WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - obj->gtt_offset, alignment, + i915_gem_obj_ggtt_offset(obj), alignment, map_and_fenceable, obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); @@ -3657,7 +3658,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } } - if (obj->gtt_space == NULL) { + if (!i915_gem_obj_ggtt_bound(obj)) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; ret = i915_gem_object_bind_to_gtt(obj, alignment, @@ -3683,7 +3684,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { BUG_ON(obj->pin_count == 0); - BUG_ON(obj->gtt_space == NULL); + BUG_ON(!i915_gem_obj_ggtt_bound(obj)); if (--obj->pin_count == 0) obj->pin_mappable = false; @@ -3733,7 +3734,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj->gtt_offset; + args->offset = i915_gem_obj_ggtt_offset(obj); out: drm_gem_object_unreference(&obj->base); unlock: diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 51b7a2171cae..2074544682cf 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, new_context->obj->gtt_offset | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 87a3227e5179..5aeb447ead6b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -ENOENT; target_i915_obj = to_intel_bo(target_obj); - target_offset = target_i915_obj->gtt_offset; + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return ret; /* Map the page containing the relocation we're going to perform. */ - reloc->offset += obj->gtt_offset; + reloc->offset += i915_gem_obj_ggtt_offset(obj); reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, reloc->offset & PAGE_MASK); reloc_entry = (uint32_t __iomem *) @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->has_aliasing_ppgtt_mapping = 1; } - if (entry->offset != obj->gtt_offset) { - entry->offset = obj->gtt_offset; + if (entry->offset != i915_gem_obj_ggtt_offset(obj)) { + entry->offset = i915_gem_obj_ggtt_offset(obj); *need_reloc = true; } @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) { struct drm_i915_gem_exec_object2 *entry; - if (!obj->gtt_space) + if (!i915_gem_obj_ggtt_bound(obj)) return; entry = obj->exec_entry; @@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; - if (!obj->gtt_space) + if (!i915_gem_obj_ggtt_bound(obj)) continue; need_fence = @@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + if ((entry->alignment && + i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else @@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) + if (i915_gem_obj_ggtt_bound(obj)) continue; ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); @@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - exec_start = batch_obj->gtt_offset + args->batch_start_offset; + exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index afba7e5e7739..6f0a4c09e26a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, enum i915_cache_level cache_level) { ppgtt->insert_entries(ppgtt, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, cache_level); } @@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj) { ppgtt->clear_range(ppgtt, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); } @@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->gtt.gtt_insert_entries(dev, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, cache_level); obj->has_global_gtt_mapping = 1; @@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->gtt.gtt_clear_range(obj->base.dev, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 537545be69db..92a8d279ca39 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) return true; if (INTEL_INFO(obj->base.dev)->gen == 3) { - if (obj->gtt_offset & ~I915_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { - if (obj->gtt_offset & ~I830_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) return false; } size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); - if (obj->gtt_space->size != size) + if (i915_gem_obj_ggtt_size(obj) != size) return false; - if (obj->gtt_offset & (size - 1)) + if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) return false; return true; @@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ obj->map_and_fenceable = - obj->gtt_space == NULL || - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && + !i915_gem_obj_ggtt_bound(obj) || + (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); - if (obj->gtt_offset & (unfenced_alignment - 1)) + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c2e11a0fa40c..4aedd387c8b5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1520,7 +1520,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, if (dst == NULL) return NULL; - reloc_offset = src->gtt_offset; + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); for (i = 0; i < num_pages; i++) { unsigned long flags; void *d; @@ -1572,7 +1572,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, reloc_offset += PAGE_SIZE; } dst->page_count = num_pages; - dst->gtt_offset = src->gtt_offset; return dst; @@ -1626,7 +1625,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->name = obj->base.name; err->rseqno = obj->last_read_seqno; err->wseqno = obj->last_write_seqno; - err->gtt_offset = obj->gtt_offset; + err->gtt_offset = i915_gem_obj_ggtt_offset(obj); err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; @@ -1724,8 +1723,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, return NULL; obj = ring->private; - if (acthd >= obj->gtt_offset && - acthd < obj->gtt_offset + obj->base.size) + if (acthd >= i915_gem_obj_ggtt_offset(obj) && + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) return i915_error_object_create(dev_priv, obj); } @@ -1806,7 +1805,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, return; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { ering->ctx = i915_error_object_create_sized(dev_priv, obj, 1); break; @@ -2160,10 +2159,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == - obj->gtt_offset; + i915_gem_obj_ggtt_offset(obj); } else { int dspaddr = DSPADDR(intel_crtc->plane); - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + crtc->y * crtc->fb->pitches[0] + crtc->x * crtc->fb->bits_per_pixel/8); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 3db4a6817713..7d283b5fcbf9 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, TP_fast_assign( __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->offset = i915_gem_obj_ggtt_offset(obj); + __entry->size = i915_gem_obj_ggtt_size(obj); __entry->mappable = mappable; ), @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, TP_fast_assign( __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->offset = i915_gem_obj_ggtt_offset(obj); + __entry->size = i915_gem_obj_ggtt_size(obj); ), TP_printk("obj=%p, offset=%08x size=%x", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aaa9a752b7b6..a45bb92f35ad 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_crtc->dspaddr_offset = linear_offset; } - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, + fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_offset + intel_crtc->dspaddr_offset); + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); POSTING_READ(reg); return 0; @@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc, fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, + fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_offset + intel_crtc->dspaddr_offset); + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { @@ -6567,7 +6569,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_unpin; } - addr = obj->gtt_offset; + addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, obj, @@ -7339,7 +7341,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_mark_page_flip_active(intel_crtc); @@ -7380,7 +7382,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, MI_NOOP); intel_mark_page_flip_active(intel_crtc); @@ -7420,7 +7422,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, - (obj->gtt_offset + intel_crtc->dspaddr_offset) | + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -7463,7 +7465,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -7528,7 +7530,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, (MI_NOOP)); intel_mark_page_flip_active(intel_crtc); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index dff669e2387f..f3c97e05b0d8 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_len = size; info->screen_base = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { ret = -ENOSPC; @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper, /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, - obj->gtt_offset, obj); + i915_gem_obj_ggtt_offset(obj), obj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a3698812e9c7..81c3ca14fa92 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_offset); + i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; } @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, swidth = params->src_w; swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); sheight = params->src_h; - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U); - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V); ostride |= params->stride_UV << 16; } @@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->gtt_offset; + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) overlay->reg_bo->phys_obj->handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_offset); + i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; } @@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; else - error->base = overlay->reg_bo->gtt_offset; + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); regs = intel_overlay_map_regs_atomic(overlay); if (!regs) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0eed35da3ea5..125a741eed86 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | IVB_DPFC_CTL_FENCE_EN | @@ -3700,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -3723,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) return; } - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e51ab552046c..54495df2403e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_offset && + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring) if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_offset; + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) { ret = -ENOMEM; @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } else { struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = obj->gtt_offset; + u32 cs_offset = i915_gem_obj_ggtt_offset(obj); if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring) goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 1fa5612a4572..55bdf70b548b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (intel_plane->can_scale) I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); /* potentially re-enable LP watermarks */ @@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } -- cgit v1.2.3 From edd41a870f11157a1bf4c15080421f9770912e09 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 5 Jul 2013 14:41:05 -0700 Subject: drm/i915: Kill obj->gtt_offset With the getters in place from the previous patch this members serves no purpose other than saving one spare pointer chase, which will be killed in the next patch anyway. Moving to VMAs, this members adds unnecessary confusion since an object may exist at different offsets in different VMs. v2: Properly preserve the stolen offset. This code is a bit hacky but it all goes away when we embed the drm_mm_node and removes the need for the incorrect patch I submitted previously: "Use gtt_space->start for stolen reservation" Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 9 +-------- drivers/gpu/drm/i915/i915_gem.c | 2 -- drivers/gpu/drm/i915/i915_gem_gtt.c | 14 ++++++++------ drivers/gpu/drm/i915/i915_gem_stolen.c | 9 ++++++--- 4 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8a92174109c2..7b8fbba4c6e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1201,7 +1201,7 @@ enum hdmi_force_audio { HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; -#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) +#define I915_GTT_RESERVED (1<<0) #define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { @@ -1329,13 +1329,6 @@ struct drm_i915_gem_object { unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; - /** - * Current offset of the object in GTT space. - * - * This is the same as gtt_space->start - */ - uint32_t gtt_offset; - struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b8a0d91f2e2d..3ea54c8eec24 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2634,7 +2634,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - obj->gtt_offset = 0; return 0; } @@ -3164,7 +3163,6 @@ search_free: list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->gtt_space = node; - obj->gtt_offset = node->start; fenceable = node->size == fence_size && diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6f0a4c09e26a..76a4095452c7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -629,18 +629,20 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + uintptr_t offset = (uintptr_t) obj->gtt_space; int ret; - DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", - obj->gtt_offset, obj->base.size); + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", + offset, obj->base.size); - BUG_ON(obj->gtt_space != I915_GTT_RESERVED); + BUG_ON((offset & I915_GTT_RESERVED) != 0); + offset &= ~I915_GTT_RESERVED; obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL); if (!obj->gtt_space) { - DRM_ERROR("Failed to preserve object at offset %x\n", - obj->gtt_offset); + DRM_ERROR("Failed to preserve object at offset %lx\n", + offset); continue; } - obj->gtt_space->start = obj->gtt_offset; + obj->gtt_space->start = (unsigned long)offset; obj->gtt_space->size = obj->base.size; ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, obj->gtt_space); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 4e6dbbb47dfe..559f75450d35 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -389,10 +389,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); goto free_out; } - } else - obj->gtt_space = I915_GTT_RESERVED; + } else { + if (WARN_ON(gtt_offset & ~PAGE_MASK)) + DRM_DEBUG_KMS("Cannot preserve non page aligned offset\n"); + obj->gtt_space = + (struct drm_mm_node *)((uintptr_t)(I915_GTT_RESERVED | gtt_offset)); + } - obj->gtt_offset = gtt_offset; obj->has_global_gtt_mapping = 1; list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); -- cgit v1.2.3 From c6cfb325677ea6305fb19acf3a4d14ea267f923e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 5 Jul 2013 14:41:06 -0700 Subject: drm/i915: Embed drm_mm_node in i915 gem obj Embedding the node in the obj is more natural in the transition to VMAs which will also have embedded nodes. This change also helps transition away from put_block to remove node. Though it's quite an uncommon occurrence, it's somewhat convenient to not fail at bind time because we cannot allocate the node. Though in practice there are other allocations (like the request structure) which would probably make this point not terribly useful. Quoting Daniel: Note that the only difference between put_block and remove_node is that the former fills up the preallocation cache. Which we don't need anyway and hence is just wasted space. v2: Clean up the stolen preallocation code. Rebased on the reserve_node patches renames ggtt_ stuff to gtt_ stuff WARN_ON if the object is already bound (which doesn't mean it's in the bound list, tricky) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 11 +++++------ drivers/gpu/drm/i915/i915_gem.c | 31 +++++++++++-------------------- drivers/gpu/drm/i915/i915_gem_evict.c | 6 +++--- drivers/gpu/drm/i915/i915_gem_gtt.c | 23 +++++------------------ drivers/gpu/drm/i915/i915_gem_stolen.c | 22 ++++------------------ 5 files changed, 28 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7b8fbba4c6e7..993fd2c8a457 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1201,7 +1201,6 @@ enum hdmi_force_audio { HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; -#define I915_GTT_RESERVED (1<<0) #define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { @@ -1228,7 +1227,7 @@ struct drm_i915_gem_object { const struct drm_i915_gem_object_ops *ops; /** Current space allocated to this object in the GTT, if any. */ - struct drm_mm_node *gtt_space; + struct drm_mm_node gtt_space; /** Stolen memory for this object, instead of being backed by shmem. */ struct drm_mm_node *stolen; struct list_head global_list; @@ -1358,14 +1357,14 @@ struct drm_i915_gem_object { static inline unsigned long i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) { - return o->gtt_space->start; + return o->gtt_space.start; } /* Whether or not this object is currently mapped by the translation tables */ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) { - return o->gtt_space != NULL; + return drm_mm_node_allocated(&o->gtt_space); } /* The size used in the translation tables may be larger than the actual size of @@ -1375,14 +1374,14 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) static inline unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) { - return o->gtt_space->size; + return o->gtt_space.size; } static inline void i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, enum i915_cache_level color) { - o->gtt_space->color = color; + o->gtt_space.color = color; } /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3ea54c8eec24..339404937ab9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2632,8 +2632,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; - drm_mm_put_block(obj->gtt_space); - obj->gtt_space = NULL; + drm_mm_remove_node(&obj->gtt_space); return 0; } @@ -3011,7 +3010,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev, if (HAS_LLC(dev)) return true; - if (gtt_space == NULL) + if (!drm_mm_node_allocated(gtt_space)) return true; if (list_empty(>t_space->node_list)) @@ -3079,7 +3078,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_mm_node *node; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; size_t gtt_max = map_and_fenceable ? @@ -3124,14 +3122,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (node == NULL) { - i915_gem_object_unpin_pages(obj); - return -ENOMEM; - } - search_free: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, + &obj->gtt_space, size, alignment, obj->cache_level, 0, gtt_max); if (ret) { @@ -3143,30 +3136,28 @@ search_free: goto search_free; i915_gem_object_unpin_pages(obj); - kfree(node); return ret; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { + if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space, + obj->cache_level))) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(node); + drm_mm_remove_node(&obj->gtt_space); return -EINVAL; } ret = i915_gem_gtt_prepare_object(obj); if (ret) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(node); + drm_mm_remove_node(&obj->gtt_space); return ret; } list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - obj->gtt_space = node; - fenceable = - node->size == fence_size && - (node->start & (fence_alignment - 1)) == 0; + i915_gem_obj_ggtt_size(obj) == fence_size && + (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0; mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end; @@ -3330,7 +3321,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { + if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) { ret = i915_gem_object_unbind(obj); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c86d5d9356fd..5f8afc48bb7e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) return false; list_add(&obj->exec_list, unwind); - return drm_mm_scan_add_block(obj->gtt_space); + return drm_mm_scan_add_block(&obj->gtt_space); } int @@ -107,7 +107,7 @@ none: struct drm_i915_gem_object, exec_list); - ret = drm_mm_scan_remove_block(obj->gtt_space); + ret = drm_mm_scan_remove_block(&obj->gtt_space); BUG_ON(ret); list_del_init(&obj->exec_list); @@ -127,7 +127,7 @@ found: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - if (drm_mm_scan_remove_block(obj->gtt_space)) { + if (drm_mm_scan_remove_block(&obj->gtt_space)) { list_move(&obj->exec_list, &eviction_list); drm_gem_object_reference(&obj->base); continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 76a4095452c7..242d0f9bb9e4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -629,28 +629,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - uintptr_t offset = (uintptr_t) obj->gtt_space; int ret; DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", - offset, obj->base.size); - - BUG_ON((offset & I915_GTT_RESERVED) != 0); - offset &= ~I915_GTT_RESERVED; - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL); - if (!obj->gtt_space) { - DRM_ERROR("Failed to preserve object at offset %lx\n", - offset); - continue; - } - obj->gtt_space->start = (unsigned long)offset; - obj->gtt_space->size = obj->base.size; + i915_gem_obj_ggtt_offset(obj), obj->base.size); + + WARN_ON(i915_gem_obj_ggtt_bound(obj)); ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, - obj->gtt_space); - if (ret) { + &obj->gtt_space); + if (ret) DRM_DEBUG_KMS("Reservation failed\n"); - kfree(obj->gtt_space); - obj->gtt_space = NULL; - } obj->has_global_gtt_mapping = 1; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 559f75450d35..08dd923c87d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -374,26 +374,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, * setting up the GTT space. The actual reservation will occur * later. */ + obj->gtt_space.start = gtt_offset; + obj->gtt_space.size = size; if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL); - if (!obj->gtt_space) { - DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n"); - goto unref_out; - } - - obj->gtt_space->start = gtt_offset; - obj->gtt_space->size = size; ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, - obj->gtt_space); + &obj->gtt_space); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - goto free_out; + goto unref_out; } - } else { - if (WARN_ON(gtt_offset & ~PAGE_MASK)) - DRM_DEBUG_KMS("Cannot preserve non page aligned offset\n"); - obj->gtt_space = - (struct drm_mm_node *)((uintptr_t)(I915_GTT_RESERVED | gtt_offset)); } obj->has_global_gtt_mapping = 1; @@ -403,9 +392,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, return obj; -free_out: - kfree(obj->gtt_space); - obj->gtt_space = NULL; unref_out: drm_gem_object_unreference(&obj->base); return NULL; -- cgit v1.2.3 From 4a33e48d0e121953342194b45d33dc752353d62b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 6 Jul 2013 12:52:05 +0200 Subject: drm/i915: fix dvo DPLL regression I've missed that intel_dvo_mode_set changes the dpll configuration. Hence when I've reworked the sequence to only enable the dpll in the crtc_enable callback in commit 66e3d5c09940d08d94b03e65b420fadaa7484318 Author: Daniel Vetter Date: Sun Jun 16 21:24:16 2013 +0200 drm/i915: move i9xx dpll enabling into crtc enable function that special DVO bit was lost. Some BSpec reading confirms that it's only needed for DVO encoders. Section 1.5.4, "DPLL A Control Register" for bit 30: "2X Clock Enable. When driving In non-gang DVO modes such as a connected flat panel or TV, a 2X" version of the clock is needed. When not using the 2X output it should be disabled. This bit cannot be set when driving the integrated LVDS port on devices such as Montara-GM." Fix this regression up. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=66516 Cc: Chris Wilson Reported-by: Chris Wilson Reviewed-by: Chris Wilson Partially-tested-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 3 ++- drivers/gpu/drm/i915/intel_display.c | 11 +++++++---- drivers/gpu/drm/i915/intel_dvo.c | 3 --- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9b51be800961..e9c50fab94af 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1126,7 +1126,8 @@ #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) -#define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_SDVO_HIGH_SPEED (1 << 30) +#define DPLL_DVO_2X_MODE (1 << 30) #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a45bb92f35ad..41efd3519e35 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4544,10 +4544,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc, } if (is_sdvo) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_PINEVIEW(dev)) @@ -4619,6 +4619,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) + dpll |= DPLL_DVO_2X_MODE; + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; @@ -5650,9 +5653,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; if (is_sdvo) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_crtc->config.has_dp_encoder) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index eb2020eb2b7e..cbbc49dc03be 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; - int dpll_reg = DPLL(pipe); switch (dvo_reg) { case DVOA: @@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; - I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); - /*I915_WRITE(DVOB_SRCDIM, (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ -- cgit v1.2.3 From 5d536e2858ead64ea945552ec6a491f968c55888 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 6 Jul 2013 12:52:06 +0200 Subject: drm/i915: dvo needs a P2 divisor of 4 Section 1.5.4, "DPLL A Control Register" from Bspec about bit 23 "FPA0/A1 P2 Clock Divide": 0 = Divide by 2 1 = Divide by 4. This bit must be set in DVO non-gang mode So copy the current limits (which should be good for i8xx) and create a new set for dvo encoders. Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 41efd3519e35..01939513b8c2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -89,7 +89,7 @@ intel_fdi_link_freq(struct drm_device *dev) return 27; } -static const intel_limit_t intel_limits_i8xx_dvo = { +static const intel_limit_t intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, @@ -102,6 +102,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 2 }, }; +static const intel_limit_t intel_limits_i8xx_dvo = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 930000, .max = 1400000 }, + .n = { .min = 3, .max = 16 }, + .m = { .min = 96, .max = 140 }, + .m1 = { .min = 18, .max = 26 }, + .m2 = { .min = 6, .max = 16 }, + .p = { .min = 4, .max = 128 }, + .p1 = { .min = 2, .max = 33 }, + .p2 = { .dot_limit = 165000, + .p2_slow = 4, .p2_fast = 4 }, +}; + static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, @@ -410,8 +423,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; - else + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) limit = &intel_limits_i8xx_dvo; + else + limit = &intel_limits_i8xx_dac; } return limit; } -- cgit v1.2.3 From 34b9674c786c73e5472e8b98a729bcdde9197859 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 20:49:44 +0200 Subject: drm/i915: convert debugfs creation/destruction to table At least for the common cases where we only need special file operations. The forcewake file is still rather more special. v2: Fix up the debugfs unregister code. v3: Actually squash in the right fixup. Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 94 ++++++++++++------------------------- 1 file changed, 30 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 396387ed207a..d4138124d993 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2375,61 +2375,35 @@ static struct drm_info_list i915_debugfs_list[] = { }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) +struct i915_debugfs_files { + const char *name; + const struct file_operations *fops; +} i915_debugfs_files[] = { + {"i915_wedged", &i915_wedged_fops}, + {"i915_max_freq", &i915_max_freq_fops}, + {"i915_min_freq", &i915_min_freq_fops}, + {"i915_cache_sharing", &i915_cache_sharing_fops}, + {"i915_ring_stop", &i915_ring_stop_fops}, + {"i915_gem_drop_caches", &i915_drop_caches_fops}, + {"i915_error_state", &i915_error_state_fops}, + {"i915_next_seqno", &i915_next_seqno_fops}, +}; + int i915_debugfs_init(struct drm_minor *minor) { - int ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_wedged", - &i915_wedged_fops); - if (ret) - return ret; + int ret, i; ret = i915_forcewake_create(minor->debugfs_root, minor); if (ret) return ret; - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_max_freq", - &i915_max_freq_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_min_freq", - &i915_min_freq_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_cache_sharing", - &i915_cache_sharing_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_ring_stop", - &i915_ring_stop_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_gem_drop_caches", - &i915_drop_caches_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_error_state", - &i915_error_state_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_next_seqno", - &i915_next_seqno_fops); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { + ret = i915_debugfs_create(minor->debugfs_root, minor, + i915_debugfs_files[i].name, + i915_debugfs_files[i].fops); + if (ret) + return ret; + } return drm_debugfs_create_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, @@ -2438,26 +2412,18 @@ int i915_debugfs_init(struct drm_minor *minor) void i915_debugfs_cleanup(struct drm_minor *minor) { + int i; + drm_debugfs_remove_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, - 1, minor); + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { + struct drm_info_list *info_list = + (struct drm_info_list *) i915_debugfs_files[i].fops; + + drm_debugfs_remove_files(info_list, 1, minor); + } } #endif /* CONFIG_DEBUG_FS */ -- cgit v1.2.3 From eaba1b8f3379b5d100bd146b9a41d28348bdfd09 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2013 12:28:35 +0100 Subject: drm/i915: Verify that our stolen memory doesn't conflict Sanity check that the memory region found through the Graphics Base of Stolen Memory is reserved and hidden from the rest of the system through the use of the resource API. v2: "Graphics Stolen Memory" is such a more bodacious name than the lame "i915 stolen", and convert to using devres for automagical cleanup of the resource. (danvet) Signed-off-by: Chris Wilson Cc: Daniel Vetter [danvet: Dump proper hexcodes.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 08dd923c87d8..a3ce2cbb9221 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -46,6 +46,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->bridge_dev; + struct resource *r; u32 base; /* On the machines I have tested the Graphics Base of Stolen Memory @@ -88,6 +89,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) #endif } + if (base == 0) + return 0; + + /* Verify that nothing else uses this physical address. Stolen + * memory should be reserved by the BIOS and hidden from the + * kernel. So if the region is already marked as busy, something + * is seriously wrong. + */ + r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, + "Graphics Stolen Memory"); + if (r == NULL) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; + } + return base; } -- cgit v1.2.3 From 36c0cc616e518bfc2b685bed7fb3243d1242eca4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 9 Jul 2013 14:44:26 +0200 Subject: drm/i915: clean up media reset on gm45 Originally I've thought that this fixes up the reset issues on my gm45, but that was just a red herring due to b0rked testing. Still I much prefer writing the right values (all other fields are reserved) instead of potentially dragging gunk around. Hence also clear the register to 0 after a reset. Note that Cspec is a bit confused and doesn't explicitly say that all the other bits in this register are "reserved, mbz" like usually. Instead they're marked as "r/o, default value = 0" which semantically amounts to the same thing. v2: Stop claiming this fixes anything and return 0 if successful instead of stack garbage. v3: Pimp the commit message to explain exactly why I think the docs allow us to ditch the rmw cycle, spurred by a discussion with Chris. Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 33cb97388fc9..ed9262c42b7b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -798,28 +798,29 @@ static int i965_reset_complete(struct drm_device *dev) static int i965_do_reset(struct drm_device *dev) { int ret; - u8 gdrst; /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); pci_write_config_byte(dev->pdev, I965_GDRST, - gdrst | GRDOM_RENDER | - GRDOM_RESET_ENABLE); + GRDOM_RENDER | GRDOM_RESET_ENABLE); ret = wait_for(i965_reset_complete(dev), 500); if (ret) return ret; /* We can't reset render&media without also resetting display ... */ - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); pci_write_config_byte(dev->pdev, I965_GDRST, - gdrst | GRDOM_MEDIA | - GRDOM_RESET_ENABLE); + GRDOM_MEDIA | GRDOM_RESET_ENABLE); - return wait_for(i965_reset_complete(dev), 500); + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + pci_write_config_byte(dev->pdev, I965_GDRST, 0); + + return 0; } static int ironlake_do_reset(struct drm_device *dev) -- cgit v1.2.3 From 897f9ed00a906dd3edc69f64d590bba87c45617b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 9 Jul 2013 14:44:27 +0200 Subject: drm/i915: WARN if the bios reserved range is bigger than stolen size v2: Bail out if we hit the WARN_ON to avoid fallout later on. Spotted by Chris Wilson. Suggested-by: Chris Wilson Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a3ce2cbb9221..76c3b8699168 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -215,6 +215,9 @@ int i915_gem_init_stolen(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ + if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) + return 0; + /* Basic memrange allocator for stolen space */ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - bios_reserved); -- cgit v1.2.3 From 885b012008583ba70e5537d479454450f5bdfa09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 19:21:38 +0300 Subject: drm/i915: Fix VLV DP RBR/HDMI/DAC PLL LPF coefficients MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I just got confirmation that we're using some old values for the PLL LPF coefficients for DP RBR/HDMI/DAC on VLV. The VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_9 document lists both values by mistake, and apparently we had picked the wrong one. Change the coefficients to the recommended values. Changing the value doesn't appear to destabilize the VGA output picture even with my sensitive HP ZR24w display. Also HDMI output to my TV still works fine. Signed-off-by: Ville Syrjälä Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 01939513b8c2..d2b76b695349 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4465,7 +4465,7 @@ static void vlv_update_pll(struct intel_crtc *crtc) intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), - 0x005f0021); + 0x009f0003); else vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 0x00d0000f); -- cgit v1.2.3 From db1b76ca6a79c774074ae87bee7afc0825a478f5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 9 Jul 2013 16:51:37 +0200 Subject: drm/i915: don't frob mm.suspended when not using ums In kernel modeset driver mode we're in full control of the chip, always. So there's no need at all to set mm.suspended in i915_gem_idle. Hence move that out into the leavevt ioctl. Since i915_gem_idle doesn't suspend gem any more we can also drop the re-enabling for KMS in the thaw function. Also clean up the handling of mm.suspend at driver load by coalescing all the assignments. Stumbled over while reading through our resume code for unrelated reasons. v2: Shovel mm.suspended into the (newly created) ums dungeon as suggested by Chris Wilson. The plan is that once we've completely stopped relying on the register save/restore code we could shovel even that in there. v3: Improve the locking for the entervt/leavevt ioctls a bit by moving the dev->struct_mutex locking outside of i915_gem_idle. Also don't clear dev_priv->ums.mm_suspended for the kms case, we allocate it with kzalloc. Both suggested by Chris Wilson. Cc: Chris Wilson Reviewed-by: Chris Wilson (v2) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 13 +++------- drivers/gpu/drm/i915/i915_drv.c | 11 +++++--- drivers/gpu/drm/i915/i915_drv.h | 24 ++++++++++-------- drivers/gpu/drm/i915/i915_gem.c | 40 ++++++++++++++++++------------ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 5 files changed, 50 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0e2214236083..bece9973e1b5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev) /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; - if (INTEL_INFO(dev)->num_pipes == 0) { - dev_priv->mm.suspended = 0; + if (INTEL_INFO(dev)->num_pipes == 0) return 0; - } ret = intel_fbdev_init(dev); if (ret) @@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev) drm_kms_helper_poll_init(dev); - /* We're off and running w/KMS */ - dev_priv->mm.suspended = 0; - return 0; cleanup_gem: @@ -1629,9 +1624,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_gem_unload; } - /* Start out suspended */ - dev_priv->mm.suspended = 1; - if (HAS_POWER_WELL(dev)) i915_init_power_well(dev); @@ -1641,6 +1633,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_ERROR("failed to init modeset\n"); goto out_gem_unload; } + } else { + /* Start out suspended in ums mode. */ + dev_priv->ums.mm_suspended = 1; } i915_setup_sysfs(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ed9262c42b7b..0485f435eeea 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -556,7 +556,11 @@ static int i915_drm_freeze(struct drm_device *dev) /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - int error = i915_gem_idle(dev); + int error; + + mutex_lock(&dev->struct_mutex); + error = i915_gem_idle(dev); + mutex_unlock(&dev->struct_mutex); if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); @@ -661,7 +665,6 @@ static int __i915_drm_thaw(struct drm_device *dev) intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); - dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); @@ -961,11 +964,11 @@ int i915_reset(struct drm_device *dev) * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || - !dev_priv->mm.suspended) { + !dev_priv->ums.mm_suspended) { struct intel_ring_buffer *ring; int i; - dev_priv->mm.suspended = 0; + dev_priv->ums.mm_suspended = 0; i915_gem_init_swizzling(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 993fd2c8a457..c81ac155f5b9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -814,6 +814,18 @@ struct i915_dri1_state { uint32_t counter; }; +struct i915_ums_state { + /** + * Flag if the X Server, and thus DRM, is not currently in + * control of the device. + * + * This is set between LeaveVT and EnterVT. It needs to be + * replaced with a semaphore. It also needs to be + * transitioned away from for kernel modesetting. + */ + int mm_suspended; +}; + struct intel_l3_parity { u32 *remap_info; struct work_struct error_work; @@ -884,16 +896,6 @@ struct i915_gem_mm { */ bool interruptible; - /** - * Flag if the X Server, and thus DRM, is not currently in - * control of the device. - * - * This is set between LeaveVT and EnterVT. It needs to be - * replaced with a semaphore. It also needs to be - * transitioned away from for kernel modesetting. - */ - int suspended; - /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ @@ -1187,6 +1189,8 @@ typedef struct drm_i915_private { /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; + /* Old ums support infrastructure, same warning applies. */ + struct i915_ums_state ums; } drm_i915_private_t; /* Iterate over initialised rings */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 339404937ab9..20b10a0fa452 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2082,7 +2082,7 @@ int __i915_add_request(struct intel_ring_buffer *ring, trace_i915_gem_request_add(ring, request->seqno); ring->outstanding_lazy_request = 0; - if (!dev_priv->mm.suspended) { + if (!dev_priv->ums.mm_suspended) { if (i915_enable_hangcheck) { mod_timer(&dev_priv->gpu_error.hangcheck_timer, round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); @@ -2398,7 +2398,7 @@ i915_gem_retire_work_handler(struct work_struct *work) idle &= list_empty(&ring->request_list); } - if (!dev_priv->mm.suspended && !idle) + if (!dev_priv->ums.mm_suspended && !idle) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); if (idle) @@ -3992,9 +3992,7 @@ i915_gem_idle(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev->struct_mutex); - - if (dev_priv->mm.suspended) { + if (dev_priv->ums.mm_suspended) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4012,18 +4010,11 @@ i915_gem_idle(struct drm_device *dev) i915_gem_reset_fences(dev); - /* Hack! Don't let anybody do execbuf while we don't control the chip. - * We need to replace this with a semaphore, or something. - * And not confound mm.suspended! - */ - dev_priv->mm.suspended = 1; del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); - mutex_unlock(&dev->struct_mutex); - /* Cancel the retire work handler, which should be idle now. */ cancel_delayed_work_sync(&dev_priv->mm.retire_work); @@ -4233,7 +4224,7 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (drm_core_check_feature(dev, DRIVER_MODESET)) @@ -4245,7 +4236,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - dev_priv->mm.suspended = 0; + dev_priv->ums.mm_suspended = 0; ret = i915_gem_init_hw(dev); if (ret != 0) { @@ -4265,7 +4256,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, cleanup_ringbuffer: mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); - dev_priv->mm.suspended = 1; + dev_priv->ums.mm_suspended = 1; mutex_unlock(&dev->struct_mutex); return ret; @@ -4275,11 +4266,26 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; drm_irq_uninstall(dev); - return i915_gem_idle(dev); + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_idle(dev); + + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + * And not confound ums.mm_suspended! + */ + if (ret != 0) + dev_priv->ums.mm_suspended = 1; + mutex_unlock(&dev->struct_mutex); + + return ret; } void @@ -4290,9 +4296,11 @@ i915_gem_lastclose(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + mutex_lock(&dev->struct_mutex); ret = i915_gem_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); + mutex_unlock(&dev->struct_mutex); } static void diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5aeb447ead6b..64eda4463b70 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -973,7 +973,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - if (dev_priv->mm.suspended) { + if (dev_priv->ums.mm_suspended) { mutex_unlock(&dev->struct_mutex); ret = -EBUSY; goto pre_mutex_err; -- cgit v1.2.3 From 12f56f51925dfca7caf079a6e6ccd22b63cdb39a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 10 Jul 2013 14:27:31 +0200 Subject: drm/i915: remove unused members from drm_i915_private Signed-off-by: Maarten Lankhorst Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c81ac155f5b9..f2f1be110101 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1102,8 +1102,6 @@ typedef struct drm_i915_private { } backlight; /* LVDS info */ - struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ - struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ bool no_aux_handshake; struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ -- cgit v1.2.3 From fee884ed285a110b665c00b07b134cd2616122bc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:21 +0200 Subject: drm/i915: extract ibx_display_interrupt_update This way all changes to SDEIMR all go through the same function, with the exception of the (single-threaded) setup/teardown code. For paranoia again add an assert_spin_locked. v2: For even more paranoia also sprinkle a spinlock assert over cpt_can_enable_serr_int since we need to have that one there, too. v3: Fix the logic of interrupt enabling, add enable/disable macros for the simple cases in the fifo code and add a comment. All requested by Paulo. Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 51 +++++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4aedd387c8b5..80b88c868cd2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -128,6 +128,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) enum pipe pipe; struct intel_crtc *crtc; + assert_spin_locked(&dev_priv->irq_lock); + for_each_pipe(pipe) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); @@ -170,6 +172,30 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, } } +/** + * ibx_display_interrupt_update - update SDEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t sdeimr = I915_READ(SDEIMR); + sdeimr &= ~interrupt_mask; + sdeimr |= (~enabled_irq_mask & interrupt_mask); + + assert_spin_locked(&dev_priv->irq_lock); + + I915_WRITE(SDEIMR, sdeimr); + POSTING_READ(SDEIMR); +} +#define ibx_enable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), (bits)) +#define ibx_disable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), 0) + static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, bool enable) { @@ -179,11 +205,9 @@ static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, SDE_TRANSB_FIFO_UNDER; if (enable) - I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); + ibx_enable_display_interrupt(dev_priv, bit); else - I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); - - POSTING_READ(SDEIMR); + ibx_disable_display_interrupt(dev_priv, bit); } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, @@ -200,12 +224,10 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, SERR_INT_TRANS_B_FIFO_UNDERRUN | SERR_INT_TRANS_C_FIFO_UNDERRUN); - I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT); + ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); } else { - I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); + ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); } - - POSTING_READ(SDEIMR); } /** @@ -2652,22 +2674,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; - u32 mask = ~I915_READ(SDEIMR); - u32 hotplug; + u32 hotplug_irqs, hotplug, enabled_irqs = 0; if (HAS_PCH_IBX(dev)) { - mask &= ~SDE_HOTPLUG_MASK; + hotplug_irqs = SDE_HOTPLUG_MASK; list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_ibx[intel_encoder->hpd_pin]; + enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; } else { - mask &= ~SDE_HOTPLUG_MASK_CPT; + hotplug_irqs = SDE_HOTPLUG_MASK_CPT; list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_cpt[intel_encoder->hpd_pin]; + enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; } - I915_WRITE(SDEIMR, ~mask); + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); /* * Enable digital hotplug on the PCH, and configure the DP short pulse -- cgit v1.2.3 From 1dd246fb165819d31119e988c2887934c255fadc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 08:30:23 +0200 Subject: drm/i915: improve SERR_INT clearing for fifo underrun reporting The current code won't report any fifo underruns on cpt if just one pipe has fifo underrun reporting disabled. We can't enable the interrupts, but we can still check the per-transcoder bits and so report the underrun delayed if: - We always clear the transcoder's bit (and none of the other bits) when enabling. - We check the transcoder's bit after disabling (to avoid racing with the interrupt handler). v2: I've forgotten to actually remove the old SERR_INT clearing. v3: Use transcoder_name as suggested by Paulo Zanoni. Paulo also noticed a logic bug: When an underrun interrupt fires we report it both in the interrupt handler and when checking for underruns when disabling it in cpt_set_fifo_underrun_reporting. But that second check is only required if the interrupt is disabled and we're switching of underrun reporting (e.g. because we're disabling the crtc). Hence check for that condition. At first I wanted to rework the code to pass that bit of information from the uppper functions down to cpt_set_fifo_underrun_reporting. But that turned out too messy. Hence the quick&dirty check whether the south error interrupt source is masked off or not. v4: Streamline the control flow a bit. v5: s/pipe/pch transcoder/ in the dmesg output, suggested by Paulo. v6: Review from Paulo: - Reorder the was_enabled assignment to only read the register when we need it. Also add a comment that we need to do that before updating the register. - s/%i/%c/ fix for the debug output. - Fix the checkpath complaint in the SERR_INT_TRANS_FIFO_UNDERRUN #define. v7: Hopefully put that elusive SERR hunk back into this patch, spotted by Paulo. Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 17 +++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 80b88c868cd2..dd9d9997e90b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -217,16 +217,25 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; if (enable) { + I915_WRITE(SERR_INT, + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + if (!cpt_can_enable_serr_int(dev)) return; - I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | - SERR_INT_TRANS_B_FIFO_UNDERRUN | - SERR_INT_TRANS_C_FIFO_UNDERRUN); - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); } else { + uint32_t tmp = I915_READ(SERR_INT); + bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); + + /* Change the state _after_ we've read out the current one. */ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); + + if (!was_enabled && + (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { + DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", + transcoder_name(pch_transcoder)); + } } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e9c50fab94af..7e2684fe4d3a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3882,6 +3882,7 @@ #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) +#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) /* digital port hotplug */ #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ -- cgit v1.2.3 From 7336df6512440a494d3a705dfc6a883a42733c8f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 9 Jul 2013 22:59:16 +0200 Subject: drm/i915: improve GEN7_ERR_INT clearing for fifo underrun reporting Same treatment as for SERR_INT: If we clear only the bit for the pipe we're enabling (but unconditionally) then we can always check for possible underruns after having disabled the interrupt. That way pipe underruns won't be lost, but at worst only get reported in a delayed fashion. v2: The same logic bug as in the SERR handling change also existed here. The same bugfix of only reporting missed underruns when the error interrupt was masked applies, too. v3: Do the same fixes as for the SERR handling that Paulo suggested in his review: - s/%i/%c/ fix in the debug output - move the DE_ERR_INT_IVB read into the respective if block Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni [danvet: Fix up the checkpatch bikeshed Paulo noticed.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 20 +++++++++++++------- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dd9d9997e90b..76e977b0070e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -154,21 +154,27 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, } static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - bool enable) + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; - if (enable) { + I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + if (!ivb_can_enable_err_int(dev)) return; - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A | - ERR_INT_FIFO_UNDERRUN_B | - ERR_INT_FIFO_UNDERRUN_C); - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); } else { + bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); + + /* Change the state _after_ we've read out the current one. */ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + + if (!was_enabled && + (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { + DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", + pipe_name(pipe)); + } } } @@ -274,7 +280,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, if (IS_GEN5(dev) || IS_GEN6(dev)) ironlake_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN7(dev)) - ivybridge_set_fifo_underrun_reporting(dev, enable); + ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); done: spin_unlock_irqrestore(&dev_priv->irq_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7e2684fe4d3a..dc3d6a74f391 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -681,6 +681,7 @@ #define ERR_INT_FIFO_UNDERRUN_C (1<<6) #define ERR_INT_FIFO_UNDERRUN_B (1<<3) #define ERR_INT_FIFO_UNDERRUN_A (1<<0) +#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) #define FPGA_DBG 0x42300 #define FPGA_DBG_RM_NOCLAIM (1<<31) -- cgit v1.2.3 From de28075d5bb3e1e9f92d19da214b6a96f544b66d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:24 +0200 Subject: drm/i915: kill lpt pch transcoder->crtc mapping code for fifo underruns It's racy: There's no guarantee that we won't walk this code (due to a pch fifo underrun interrupt) while someone is changing the pointers around. The only reason we do this is to use the righ crtc for the pch fifo underrun accounting. But we never expose this to userspace, so essentially no one really cares if we use the "wrong" crtc. So let's just rip it out. With this patch fifo underrun code will always use crtc A for tracking underruns on the (only) pch transcoder on LPT. v2: Add a big comment explaining what's going on. Requested by Paulo. v3: Fixup spelling in comment as spotted by Paulo. Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 40 +++++++++++++++------------------------- 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 76e977b0070e..69957f889432 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -202,13 +202,13 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, #define ibx_disable_display_interrupt(dev_priv, bits) \ ibx_display_interrupt_update((dev_priv), (bits), 0) -static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, +static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, + enum transcoder pch_transcoder, bool enable) { - struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : - SDE_TRANSB_FIFO_UNDER; + uint32_t bit = (pch_transcoder == TRANSCODER_A) ? + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) ibx_enable_display_interrupt(dev_priv, bit); @@ -306,29 +306,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe p; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long flags; bool ret; - if (HAS_PCH_LPT(dev)) { - crtc = NULL; - for_each_pipe(p) { - struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; - if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { - crtc = c; - break; - } - } - if (!crtc) { - DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n"); - return false; - } - } else { - crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; - } - intel_crtc = to_intel_crtc(crtc); + /* + * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT + * has only one pch transcoder A that all pipes can use. To avoid racy + * pch transcoder -> pipe lookups from interrupt code simply store the + * underrun statistics in crtc A. Since we never expose this anywhere + * nor use it outside of the fifo underrun code here using the "wrong" + * crtc on LPT won't cause issues. + */ spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -340,7 +330,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, intel_crtc->pch_fifo_underrun_disabled = !enable; if (HAS_PCH_IBX(dev)) - ibx_set_fifo_underrun_reporting(intel_crtc, enable); + ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); else cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); -- cgit v1.2.3 From d0ecd7e221c87514b1eca84b11fee1e262f5d816 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:25 +0200 Subject: drm/i915: irq handlers don't need interrupt-safe spinlocks Since we only have one interrupt handler and interrupt handlers are non-reentrant. To drive the point really home give them all an _irq_handler suffix. This is a tiny micro-optimization but even more important it makes it clearer what locking we actually need. And in case someone screws this up: lockdep will catch hardirq vs. other context deadlocks. v2: Fix up compile fail. Reviewed-by: Paulo Zanoni Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 42 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 69957f889432..a7c0a730a6e1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -656,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void ironlake_handle_rps_change(struct drm_device *dev) +static void ironlake_rps_change_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; - unsigned long flags; - spin_lock_irqsave(&mchdev_lock, flags); + spin_lock(&mchdev_lock); I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -691,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev) if (ironlake_set_drps(dev, new_delay)) dev_priv->ips.cur_delay = new_delay; - spin_unlock_irqrestore(&mchdev_lock, flags); + spin_unlock(&mchdev_lock); return; } @@ -835,18 +834,17 @@ static void ivybridge_parity_work(struct work_struct *work) kfree(parity_event[1]); } -static void ivybridge_handle_parity_error(struct drm_device *dev) +static void ivybridge_parity_error_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long flags; if (!HAS_L3_GPU_CACHE(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, flags); + spin_lock(&dev_priv->irq_lock); dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); } @@ -872,15 +870,13 @@ static void snb_gt_irq_handler(struct drm_device *dev, } if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) - ivybridge_handle_parity_error(dev); + ivybridge_parity_error_irq_handler(dev); } /* Legacy way of handling PM interrupts */ -static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, - u32 pm_iir) +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, + u32 pm_iir) { - unsigned long flags; - /* * IIR bits should never already be set because IMR should * prevent an interrupt from being shown in IIR. The warning @@ -891,11 +887,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, * The mask bit in IMR is cleared by dev_priv->rps.work. */ - spin_lock_irqsave(&dev_priv->rps.lock, flags); + spin_lock(&dev_priv->rps.lock); dev_priv->rps.pm_iir |= pm_iir; I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock(&dev_priv->rps.lock); queue_work(dev_priv->wq, &dev_priv->rps.work); } @@ -959,7 +955,7 @@ static void dp_aux_irq_handler(struct drm_device *dev) wake_up_all(&dev_priv->gmbus_wait_queue); } -/* Unlike gen6_queue_rps_work() from which this function is originally derived, +/* Unlike gen6_rps_irq_handler() from which this function is originally derived, * we must be able to deal with other PM interrupts. This is complicated because * of the way in which we use the masks to defer the RPS work (which for * posterity is necessary because of forcewake). @@ -967,9 +963,7 @@ static void dp_aux_irq_handler(struct drm_device *dev) static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - unsigned long flags; - - spin_lock_irqsave(&dev_priv->rps.lock, flags); + spin_lock(&dev_priv->rps.lock); dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; if (dev_priv->rps.pm_iir) { I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); @@ -978,7 +972,7 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, /* TODO: if queue_work is slow, move it out of the spinlock */ queue_work(dev_priv->wq, &dev_priv->rps.work); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock(&dev_priv->rps.lock); if (pm_iir & ~GEN6_PM_RPS_EVENTS) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) @@ -1060,7 +1054,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gmbus_irq_handler(dev); if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(GEN6_PMIIR, pm_iir); @@ -1298,7 +1292,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) if (IS_HASWELL(dev)) hsw_pm_irq_handler(dev_priv, pm_iir); else if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; } @@ -1415,10 +1409,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) } if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_handle_rps_change(dev); + ironlake_rps_change_irq_handler(dev); if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(DEIIR, de_iir); -- cgit v1.2.3 From 41a05a3a5cdc5d731014588b9a24759af1804d48 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:26 +0200 Subject: drm/i915: streamline hsw_pm_irq_handler The if (pm_iir & ~GEN6_PM_RPS_EVENTS) check was redunandant. Otoh adding a check for rps events allows us to avoid the spinlock grabbing for VECS interrupts. v2: Drop misplaced hunk which now moved to the right patch. Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a7c0a730a6e1..d4af11541287 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -963,25 +963,23 @@ static void dp_aux_irq_handler(struct drm_device *dev) static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - spin_lock(&dev_priv->rps.lock); - dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - if (dev_priv->rps.pm_iir) { + if (pm_iir & GEN6_PM_RPS_EVENTS) { + spin_lock(&dev_priv->rps.lock); + dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); /* never want to mask useful interrupts. (also posting read) */ WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); /* TODO: if queue_work is slow, move it out of the spinlock */ queue_work(dev_priv->wq, &dev_priv->rps.work); + spin_unlock(&dev_priv->rps.lock); } - spin_unlock(&dev_priv->rps.lock); - if (pm_iir & ~GEN6_PM_RPS_EVENTS) { - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { - DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); - i915_handle_error(dev_priv->dev, false); - } + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { + DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); + i915_handle_error(dev_priv->dev, false); } } -- cgit v1.2.3 From 2adbee62e00d869a30cb93ea2269e5ea26a9bbc4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:27 +0200 Subject: drm/i915: queue work outside spinlock in hsw_pm_irq_handler And kill the comment about it. Queueing work is a barrier type event, no amount of locking will help in ordering things (as long as we queue the work after having updated all relevant data structures). Also, the queue_work works itself as a sufficient memory barrier. Again on the surface this is just a tiny micro-optimization to reduce the hold-time of dev_priv->irq_lock. But the better reason is that it reduces superficial locking and so makes it clearer what we actually need for correctness. Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d4af11541287..04861995fe1b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -969,9 +969,9 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); /* never want to mask useful interrupts. (also posting read) */ WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); - /* TODO: if queue_work is slow, move it out of the spinlock */ - queue_work(dev_priv->wq, &dev_priv->rps.work); spin_unlock(&dev_priv->rps.lock); + + queue_work(dev_priv->wq, &dev_priv->rps.work); } if (pm_iir & PM_VEBOX_USER_INTERRUPT) -- cgit v1.2.3 From 59cdb63d529c81fc8ac0620ad50f29d5fb4411c9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:28 +0200 Subject: drm/i915: kill dev_priv->rps.lock Now that the rps interrupt locking isn't clearly separated (at elast conceptually) from all the other interrupt locking having a different lock stopped making sense: It protects much more than just the rps workqueue it started out with. But with the addition of VECS the separation started to blurr and resulted in some more complex locking for the ring interrupt refcount. With this we can (again) unifiy the ringbuffer irq refcounts without causing a massive confusion, but that's for the next patch. v2: Explain better why the rps.lock once made sense and why no longer, requested by Ben. Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 1 - drivers/gpu/drm/i915/i915_drv.h | 8 ++++---- drivers/gpu/drm/i915/i915_irq.c | 12 ++++++------ drivers/gpu/drm/i915/intel_pm.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++---- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 6 files changed, 23 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index bece9973e1b5..6ce903306320 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1607,7 +1607,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->backlight.lock); mutex_init(&dev_priv->dpio_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2f1be110101..846500a8586c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -743,12 +743,12 @@ struct i915_suspend_saved_registers { }; struct intel_gen6_power_mgmt { + /* work and pm_iir are protected by dev_priv->irq_lock */ struct work_struct work; - struct delayed_work vlv_work; u32 pm_iir; - /* lock - irqsave spinlock that protectects the work_struct and - * pm_iir. */ - spinlock_t lock; + + /* On vlv we need to manually drop to Vmin with a delayed work. */ + struct delayed_work vlv_work; /* The below variables an all the rps hw state are protected by * dev->struct mutext. */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 04861995fe1b..f4d5569834c2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -719,13 +719,13 @@ static void gen6_pm_rps_work(struct work_struct *work) u32 pm_iir, pm_imr; u8 new_delay; - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); /* Make sure not to corrupt PMIMR state used by ringbuffer code */ I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) return; @@ -887,11 +887,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, * The mask bit in IMR is cleared by dev_priv->rps.work. */ - spin_lock(&dev_priv->rps.lock); + spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir; I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); POSTING_READ(GEN6_PMIMR); - spin_unlock(&dev_priv->rps.lock); + spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); } @@ -964,12 +964,12 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { if (pm_iir & GEN6_PM_RPS_EVENTS) { - spin_lock(&dev_priv->rps.lock); + spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); /* never want to mask useful interrupts. (also posting read) */ WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); - spin_unlock(&dev_priv->rps.lock); + spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 125a741eed86..a9be0d1c173d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3135,9 +3135,9 @@ static void gen6_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); } @@ -3154,9 +3154,9 @@ static void valleyview_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); @@ -3321,13 +3321,13 @@ static void gen6_enable_rps(struct drm_device *dev) /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); /* FIXME: Our interrupt enabling sequence is bonghits. * dev_priv->rps.pm_iir really should be 0 here. */ dev_priv->rps.pm_iir = 0; I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); /* unmask all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); @@ -3601,10 +3601,10 @@ static void valleyview_enable_rps(struct drm_device *dev) /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 54495df2403e..33a74a803008 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1055,14 +1055,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - spin_lock_irqsave(&dev_priv->rps.lock, flags); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount.pm++ == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~ring->irq_enable_mask); I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); POSTING_READ(GEN6_PMIMR); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -1077,14 +1077,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return; - spin_lock_irqsave(&dev_priv->rps.lock, flags); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount.pm == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~0); I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); POSTING_READ(GEN6_PMIMR); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static int diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 799f04c9da45..8a87b3f9974c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -80,7 +80,7 @@ struct intel_ring_buffer { struct { u32 gt; /* protected by dev_priv->irq_lock */ - u32 pm; /* protected by dev_priv->rps.lock (sucks) */ + u32 pm; /* protected by dev_priv->irq_lock */ } irq_refcount; u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 trace_irq_seqno; -- cgit v1.2.3 From c7113cc35f59b46b301367b947c4f71ac8f0d5bb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:29 +0200 Subject: drm/i915: unify ring irq refcounts (again) With the simplified locking there's no reason any more to keep the refcounts seperate. v2: Readd the lost comment that ring->irq_refcount is protected by dev_priv->irq_lock. Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 20 ++++++++++---------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +---- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 33a74a803008..23ffe1d06220 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -821,7 +821,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); @@ -839,7 +839,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); @@ -858,7 +858,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -876,7 +876,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -895,7 +895,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -913,7 +913,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -1006,7 +1006,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) gen6_gt_force_wake_get(dev_priv); spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | @@ -1030,7 +1030,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); @@ -1056,7 +1056,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.pm++ == 0) { + if (ring->irq_refcount++ == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~ring->irq_enable_mask); I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); @@ -1078,7 +1078,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) return; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.pm == 0) { + if (--ring->irq_refcount == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~0); I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8a87b3f9974c..6e38256d41e1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -78,10 +78,7 @@ struct intel_ring_buffer { */ u32 last_retired_head; - struct { - u32 gt; /* protected by dev_priv->irq_lock */ - u32 pm; /* protected by dev_priv->irq_lock */ - } irq_refcount; + unsigned irq_refcount; /* protected by dev_priv->irq_lock */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 trace_irq_seqno; u32 sync_seqno[I915_NUM_RINGS-1]; -- cgit v1.2.3 From c0d6a3dd61d46a640ead0a9d38b78ca22d37a304 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:30 +0200 Subject: drm/i915: don't enable PM_VEBOX_CS_ERROR_INTERRUPT The code to handle it is broken - there's simply no code to clear CS parser errors on gen5+. And behold, for all the other rings we also don't enable it! Leave the handling code itself in place just to be consistent with the existing mess though. And in case someone feels like fixing it all up. This has been errornously enabled in commit 12638c57f31952127c734c26315e1348fa1334c2 Author: Ben Widawsky Date: Tue May 28 19:22:31 2013 -0700 drm/i915: Enable vebox interrupts Cc: Damien Lespiau Cc: Ben Widawsky Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 3 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f4d5569834c2..cf1a21a9728a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2814,8 +2814,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); if (HAS_VEBOX(dev)) - pm_irqs |= PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; + pm_irqs |= PM_VEBOX_USER_INTERRUPT; /* Our enable/disable rps functions may touch these registers so * make sure to set a known state for only the non-RPS bits. diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 23ffe1d06220..815e30332247 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2000,8 +2000,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; -- cgit v1.2.3 From 08e2a7de8ec86054a1272e4fc9d15fa6c18d3b16 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 11 Jul 2013 20:10:54 +0100 Subject: drm/i915: Use for_each_pipe() when possible Came accross two open coding of for_each_pipe(), might as well use the macro. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d2b76b695349..bca81eecd31e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1131,7 +1131,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, } /* Need to check both planes against the pipe */ - for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + for_each_pipe(i) { reg = DSPCNTR(i); val = I915_READ(reg); cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> @@ -9715,7 +9715,7 @@ void intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev)->num_pipes, INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); - for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + for_each_pipe(i) { intel_crtc_init(dev, i); for (j = 0; j < dev_priv->num_plane; j++) { ret = intel_plane_init(dev, i, j); -- cgit v1.2.3 From 53b914084950e5766b40228c4e08706e28745fa5 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 12 Jul 2013 16:24:40 +0100 Subject: drm/i915: Don't attempt to read an unitialized stack value If intel_sdvo_get_value() fails here, val is unitialized and the cross check will compare the pipe config multiplier with a bogus value. Instead, only set encoder_pixel_multiplier when the sdvo command has been successful. The cross check will compare the pipe config value with 0 otherwise. v2: Do the cross check with the initial value of encoder_pixel_multiplier (0) if the sdvo command fails (and thus keep the warning) (Daniel Vetter) Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 8415d6a610dd..798df114cfd3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1357,17 +1357,19 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } /* Cross check the port pixel multiplier with the sdvo encoder state. */ - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); - switch (val) { - case SDVO_CLOCK_RATE_MULT_1X: - encoder_pixel_multiplier = 1; - break; - case SDVO_CLOCK_RATE_MULT_2X: - encoder_pixel_multiplier = 2; - break; - case SDVO_CLOCK_RATE_MULT_4X: - encoder_pixel_multiplier = 4; - break; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, + &val, 1)) { + switch (val) { + case SDVO_CLOCK_RATE_MULT_1X: + encoder_pixel_multiplier = 1; + break; + case SDVO_CLOCK_RATE_MULT_2X: + encoder_pixel_multiplier = 2; + break; + case SDVO_CLOCK_RATE_MULT_4X: + encoder_pixel_multiplier = 4; + break; + } } WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, -- cgit v1.2.3 From 84734a049d0ef2f6f5fb0a1fe060cd51480dd855 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 12 Jul 2013 16:50:57 +0300 Subject: drm/i915: move error state to own compilation unit Move error state generation and stringification to it's own compilation unit. Sysfs also uses this so it can't be under CONFIG_DEBUG_FS This fixes a regression introduced in commit ef86ddced720fddc3835558447a7f594d3609c73 Author: Mika Kuoppala Date: Thu Jun 6 17:38:54 2013 +0300 drm/i915: add error_state sysfs entry Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=66814 Signed-off-by: Mika Kuoppala Reported-by: kbuild test robot Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 415 +-------------- drivers/gpu/drm/i915/i915_drv.h | 24 +- drivers/gpu/drm/i915/i915_gpu_error.c | 971 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_irq.c | 529 ------------------ drivers/gpu/drm/i915/intel_display.c | 4 - drivers/gpu/drm/i915/intel_overlay.c | 4 - 7 files changed, 983 insertions(+), 965 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_gpu_error.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 40034ecefd3b..9d1da7cceb21 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -5,6 +5,7 @@ ccflags-y := -Iinclude/drm i915-y := i915_drv.o i915_dma.o i915_irq.o \ i915_debugfs.o \ + i915_gpu_error.o \ i915_suspend.o \ i915_gem.o \ i915_gem_context.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d4138124d993..86379799dab8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "intel_drv.h" #include "intel_ringbuffer.h" @@ -90,16 +89,6 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) } } -static const char *cache_level_str(int type) -{ - switch (type) { - case I915_CACHE_NONE: return " uncached"; - case I915_CACHE_LLC: return " snooped (LLC)"; - case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; - default: return ""; - } -} - static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { @@ -113,7 +102,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->last_read_seqno, obj->last_write_seqno, obj->last_fenced_seqno, - cache_level_str(obj->cache_level), + i915_cache_level_str(obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) @@ -608,358 +597,6 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } -static const char *ring_str(int ring) -{ - switch (ring) { - case RCS: return "render"; - case VCS: return "bsd"; - case BCS: return "blt"; - case VECS: return "vebox"; - default: return ""; - } -} - -static const char *pin_flag(int pinned) -{ - if (pinned > 0) - return " P"; - else if (pinned < 0) - return " p"; - else - return ""; -} - -static const char *tiling_flag(int tiling) -{ - switch (tiling) { - default: - case I915_TILING_NONE: return ""; - case I915_TILING_X: return " X"; - case I915_TILING_Y: return " Y"; - } -} - -static const char *dirty_flag(int dirty) -{ - return dirty ? " dirty" : ""; -} - -static const char *purgeable_flag(int purgeable) -{ - return purgeable ? " purgeable" : ""; -} - -static bool __i915_error_ok(struct drm_i915_error_state_buf *e) -{ - - if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { - e->err = -ENOSPC; - return false; - } - - if (e->bytes == e->size - 1 || e->err) - return false; - - return true; -} - -static bool __i915_error_seek(struct drm_i915_error_state_buf *e, - unsigned len) -{ - if (e->pos + len <= e->start) { - e->pos += len; - return false; - } - - /* First vsnprintf needs to fit in its entirety for memmove */ - if (len >= e->size) { - e->err = -EIO; - return false; - } - - return true; -} - -static void __i915_error_advance(struct drm_i915_error_state_buf *e, - unsigned len) -{ - /* If this is first printf in this window, adjust it so that - * start position matches start of the buffer - */ - - if (e->pos < e->start) { - const size_t off = e->start - e->pos; - - /* Should not happen but be paranoid */ - if (off > len || e->bytes) { - e->err = -EIO; - return; - } - - memmove(e->buf, e->buf + off, len - off); - e->bytes = len - off; - e->pos = e->start; - return; - } - - e->bytes += len; - e->pos += len; -} - -static void i915_error_vprintf(struct drm_i915_error_state_buf *e, - const char *f, va_list args) -{ - unsigned len; - - if (!__i915_error_ok(e)) - return; - - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - len = vsnprintf(NULL, 0, f, args); - if (!__i915_error_seek(e, len)) - return; - } - - len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; - - __i915_error_advance(e, len); -} - -static void i915_error_puts(struct drm_i915_error_state_buf *e, - const char *str) -{ - unsigned len; - - if (!__i915_error_ok(e)) - return; - - len = strlen(str); - - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - if (!__i915_error_seek(e, len)) - return; - } - - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; - memcpy(e->buf + e->bytes, str, len); - - __i915_error_advance(e, len); -} - -void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) -{ - va_list args; - - va_start(args, f); - i915_error_vprintf(e, f, args); - va_end(args); -} - -#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) -#define err_puts(e, s) i915_error_puts(e, s) - -static void print_error_buffers(struct drm_i915_error_state_buf *m, - const char *name, - struct drm_i915_error_buffer *err, - int count) -{ - err_printf(m, "%s [%d]:\n", name, count); - - while (count--) { - err_printf(m, " %08x %8u %02x %02x %x %x", - err->gtt_offset, - err->size, - err->read_domains, - err->write_domain, - err->rseqno, err->wseqno); - err_puts(m, pin_flag(err->pinned)); - err_puts(m, tiling_flag(err->tiling)); - err_puts(m, dirty_flag(err->dirty)); - err_puts(m, purgeable_flag(err->purgeable)); - err_puts(m, err->ring != -1 ? " " : ""); - err_puts(m, ring_str(err->ring)); - err_puts(m, cache_level_str(err->cache_level)); - - if (err->name) - err_printf(m, " (name: %d)", err->name); - if (err->fence_reg != I915_FENCE_REG_NONE) - err_printf(m, " (fence: %d)", err->fence_reg); - - err_puts(m, "\n"); - err++; - } -} - -static void i915_ring_error_state(struct drm_i915_error_state_buf *m, - struct drm_device *dev, - struct drm_i915_error_state *error, - unsigned ring) -{ - BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ - err_printf(m, "%s command stream:\n", ring_str(ring)); - err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); - err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); - err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); - err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); - err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); - err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); - err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); - if (ring == RCS && INTEL_INFO(dev)->gen >= 4) - err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); - - if (INTEL_INFO(dev)->gen >= 4) - err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); - err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); - err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); - if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); - err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); - err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][0], - error->semaphore_seqno[ring][0]); - err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][1], - error->semaphore_seqno[ring][1]); - } - err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); - err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); - err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); - err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); -} - -int i915_error_state_to_str(struct drm_i915_error_state_buf *m, - const struct i915_error_state_file_priv *error_priv) -{ - struct drm_device *dev = error_priv->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_error_state *error = error_priv->error; - struct intel_ring_buffer *ring; - int i, j, page, offset, elt; - - if (!error) { - err_printf(m, "no error state collected\n"); - goto out; - } - - err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, - error->time.tv_usec); - err_printf(m, "Kernel: " UTS_RELEASE "\n"); - err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); - err_printf(m, "EIR: 0x%08x\n", error->eir); - err_printf(m, "IER: 0x%08x\n", error->ier); - err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); - err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); - err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); - err_printf(m, "CCID: 0x%08x\n", error->ccid); - - for (i = 0; i < dev_priv->num_fence_regs; i++) - err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - - for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) - err_printf(m, " INSTDONE_%d: 0x%08x\n", i, - error->extra_instdone[i]); - - if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, "ERROR: 0x%08x\n", error->error); - err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); - } - - if (INTEL_INFO(dev)->gen == 7) - err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - - for_each_ring(ring, dev_priv, i) - i915_ring_error_state(m, dev, error, i); - - if (error->active_bo) - print_error_buffers(m, "Active", - error->active_bo, - error->active_bo_count); - - if (error->pinned_bo) - print_error_buffers(m, "Pinned", - error->pinned_bo, - error->pinned_bo_count); - - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - struct drm_i915_error_object *obj; - - if ((obj = error->ring[i].batchbuffer)) { - err_printf(m, "%s --- gtt_offset = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", offset, - obj->pages[page][elt]); - offset += 4; - } - } - } - - if (error->ring[i].num_requests) { - err_printf(m, "%s --- %d requests\n", - dev_priv->ring[i].name, - error->ring[i].num_requests); - for (j = 0; j < error->ring[i].num_requests; j++) { - err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", - error->ring[i].requests[j].seqno, - error->ring[i].requests[j].jiffies, - error->ring[i].requests[j].tail); - } - } - - if ((obj = error->ring[i].ringbuffer)) { - err_printf(m, "%s --- ringbuffer = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", - offset, - obj->pages[page][elt]); - offset += 4; - } - } - } - - obj = error->ring[i].ctx; - if (obj) { - err_printf(m, "%s --- HW Context = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { - err_printf(m, "[%04x] %08x %08x %08x %08x\n", - offset, - obj->pages[0][elt], - obj->pages[0][elt+1], - obj->pages[0][elt+2], - obj->pages[0][elt+3]); - offset += 16; - } - } - } - - if (error->overlay) - intel_overlay_print_error_state(m, error->overlay); - - if (error->display) - intel_display_print_error_state(m, dev, error->display); - -out: - if (m->bytes == 0 && m->err) - return m->err; - - return 0; -} - static ssize_t i915_error_state_write(struct file *filp, const char __user *ubuf, @@ -982,26 +619,6 @@ i915_error_state_write(struct file *filp, return cnt; } -void i915_error_state_get(struct drm_device *dev, - struct i915_error_state_file_priv *error_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error_priv->error = dev_priv->gpu_error.first_error; - if (error_priv->error) - kref_get(&error_priv->error->ref); - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - -} - -void i915_error_state_put(struct i915_error_state_file_priv *error_priv) -{ - if (error_priv->error) - kref_put(&error_priv->error->ref, i915_error_state_free); -} - static int i915_error_state_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; @@ -1030,36 +647,6 @@ static int i915_error_state_release(struct inode *inode, struct file *file) return 0; } -int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, - size_t count, loff_t pos) -{ - memset(ebuf, 0, sizeof(*ebuf)); - - /* We need to have enough room to store any i915_error_state printf - * so that we can move it to start position. - */ - ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); - - if (ebuf->buf == NULL) { - ebuf->size = PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); - } - - if (ebuf->buf == NULL) { - ebuf->size = 128; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); - } - - if (ebuf->buf == NULL) - return -ENOMEM; - - ebuf->start = pos; - - return 0; -} - static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, size_t count, loff_t *pos) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 846500a8586c..65d54edae176 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1627,21 +1627,12 @@ extern void intel_hpd_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_reset(struct drm_device *dev); -void i915_error_state_free(struct kref *error_ref); - void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); -#ifdef CONFIG_DEBUG_FS -extern void i915_destroy_error_state(struct drm_device *dev); -#else -#define i915_destroy_error_state(x) -#endif - - /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1950,13 +1941,12 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor); + +/* i915_gpu_error.c */ __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, const struct i915_error_state_file_priv *error); -void i915_error_state_get(struct drm_device *dev, - struct i915_error_state_file_priv *error_priv); -void i915_error_state_put(struct i915_error_state_file_priv *error_priv); int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, size_t count, loff_t pos); static inline void i915_error_state_buf_release( @@ -1964,6 +1954,14 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } +void i915_capture_error_state(struct drm_device *dev); +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv); +void i915_error_state_put(struct i915_error_state_file_priv *error_priv); +void i915_destroy_error_state(struct drm_device *dev); + +void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +const char *i915_cache_level_str(int type); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); @@ -2043,7 +2041,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* overlay */ -#ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); @@ -2052,7 +2049,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); -#endif /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c new file mode 100644 index 000000000000..58386cebb865 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -0,0 +1,971 @@ +/* + * Copyright (c) 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Keith Packard + * Mika Kuoppala + * + */ + +#include +#include "i915_drv.h" + +static const char *yesno(int v) +{ + return v ? "yes" : "no"; +} + +static const char *ring_str(int ring) +{ + switch (ring) { + case RCS: return "render"; + case VCS: return "bsd"; + case BCS: return "blt"; + case VECS: return "vebox"; + default: return ""; + } +} + +static const char *pin_flag(int pinned) +{ + if (pinned > 0) + return " P"; + else if (pinned < 0) + return " p"; + else + return ""; +} + +static const char *tiling_flag(int tiling) +{ + switch (tiling) { + default: + case I915_TILING_NONE: return ""; + case I915_TILING_X: return " X"; + case I915_TILING_Y: return " Y"; + } +} + +static const char *dirty_flag(int dirty) +{ + return dirty ? " dirty" : ""; +} + +static const char *purgeable_flag(int purgeable) +{ + return purgeable ? " purgeable" : ""; +} + +static bool __i915_error_ok(struct drm_i915_error_state_buf *e) +{ + + if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { + e->err = -ENOSPC; + return false; + } + + if (e->bytes == e->size - 1 || e->err) + return false; + + return true; +} + +static bool __i915_error_seek(struct drm_i915_error_state_buf *e, + unsigned len) +{ + if (e->pos + len <= e->start) { + e->pos += len; + return false; + } + + /* First vsnprintf needs to fit in its entirety for memmove */ + if (len >= e->size) { + e->err = -EIO; + return false; + } + + return true; +} + +static void __i915_error_advance(struct drm_i915_error_state_buf *e, + unsigned len) +{ + /* If this is first printf in this window, adjust it so that + * start position matches start of the buffer + */ + + if (e->pos < e->start) { + const size_t off = e->start - e->pos; + + /* Should not happen but be paranoid */ + if (off > len || e->bytes) { + e->err = -EIO; + return; + } + + memmove(e->buf, e->buf + off, len - off); + e->bytes = len - off; + e->pos = e->start; + return; + } + + e->bytes += len; + e->pos += len; +} + +static void i915_error_vprintf(struct drm_i915_error_state_buf *e, + const char *f, va_list args) +{ + unsigned len; + + if (!__i915_error_ok(e)) + return; + + /* Seek the first printf which is hits start position */ + if (e->pos < e->start) { + len = vsnprintf(NULL, 0, f, args); + if (!__i915_error_seek(e, len)) + return; + } + + len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); + if (len >= e->size - e->bytes) + len = e->size - e->bytes - 1; + + __i915_error_advance(e, len); +} + +static void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str) +{ + unsigned len; + + if (!__i915_error_ok(e)) + return; + + len = strlen(str); + + /* Seek the first printf which is hits start position */ + if (e->pos < e->start) { + if (!__i915_error_seek(e, len)) + return; + } + + if (len >= e->size - e->bytes) + len = e->size - e->bytes - 1; + memcpy(e->buf + e->bytes, str, len); + + __i915_error_advance(e, len); +} + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static void print_error_buffers(struct drm_i915_error_state_buf *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + err_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + err_printf(m, " %08x %8u %02x %02x %x %x", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->rseqno, err->wseqno); + err_puts(m, pin_flag(err->pinned)); + err_puts(m, tiling_flag(err->tiling)); + err_puts(m, dirty_flag(err->dirty)); + err_puts(m, purgeable_flag(err->purgeable)); + err_puts(m, err->ring != -1 ? " " : ""); + err_puts(m, ring_str(err->ring)); + err_puts(m, i915_cache_level_str(err->cache_level)); + + if (err->name) + err_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + err_printf(m, " (fence: %d)", err->fence_reg); + + err_puts(m, "\n"); + err++; + } +} + +static void i915_ring_error_state(struct drm_i915_error_state_buf *m, + struct drm_device *dev, + struct drm_i915_error_state *error, + unsigned ring) +{ + BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ + err_printf(m, "%s command stream:\n", ring_str(ring)); + err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); + err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); + err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); + err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); + err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); + err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); + err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); + if (ring == RCS && INTEL_INFO(dev)->gen >= 4) + err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); + + if (INTEL_INFO(dev)->gen >= 4) + err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); + err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); + err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); + if (INTEL_INFO(dev)->gen >= 6) { + err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); + err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); + err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][0], + error->semaphore_seqno[ring][0]); + err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][1], + error->semaphore_seqno[ring][1]); + } + err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); + err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); + err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); + err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); +} + +void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) +{ + va_list args; + + va_start(args, f); + i915_error_vprintf(e, f, args); + va_end(args); +} + +int i915_error_state_to_str(struct drm_i915_error_state_buf *m, + const struct i915_error_state_file_priv *error_priv) +{ + struct drm_device *dev = error_priv->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error = error_priv->error; + struct intel_ring_buffer *ring; + int i, j, page, offset, elt; + + if (!error) { + err_printf(m, "no error state collected\n"); + goto out; + } + + err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); + err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + err_printf(m, "EIR: 0x%08x\n", error->eir); + err_printf(m, "IER: 0x%08x\n", error->ier); + err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); + err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); + err_printf(m, "CCID: 0x%08x\n", error->ccid); + + for (i = 0; i < dev_priv->num_fence_regs; i++) + err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + + for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) + err_printf(m, " INSTDONE_%d: 0x%08x\n", i, + error->extra_instdone[i]); + + if (INTEL_INFO(dev)->gen >= 6) { + err_printf(m, "ERROR: 0x%08x\n", error->error); + err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); + } + + if (INTEL_INFO(dev)->gen == 7) + err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + + for_each_ring(ring, dev_priv, i) + i915_ring_error_state(m, dev, error, i); + + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo, + error->active_bo_count); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo, + error->pinned_bo_count); + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + struct drm_i915_error_object *obj; + + if ((obj = error->ring[i].batchbuffer)) { + err_printf(m, "%s --- gtt_offset = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + err_printf(m, "%08x : %08x\n", offset, + obj->pages[page][elt]); + offset += 4; + } + } + } + + if (error->ring[i].num_requests) { + err_printf(m, "%s --- %d requests\n", + dev_priv->ring[i].name, + error->ring[i].num_requests); + for (j = 0; j < error->ring[i].num_requests; j++) { + err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", + error->ring[i].requests[j].seqno, + error->ring[i].requests[j].jiffies, + error->ring[i].requests[j].tail); + } + } + + if ((obj = error->ring[i].ringbuffer)) { + err_printf(m, "%s --- ringbuffer = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + err_printf(m, "%08x : %08x\n", + offset, + obj->pages[page][elt]); + offset += 4; + } + } + } + + obj = error->ring[i].ctx; + if (obj) { + err_printf(m, "%s --- HW Context = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + offset, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + offset += 16; + } + } + } + + if (error->overlay) + intel_overlay_print_error_state(m, error->overlay); + + if (error->display) + intel_display_print_error_state(m, dev, error->display); + +out: + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count, loff_t pos) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + /* We need to have enough room to store any i915_error_state printf + * so that we can move it to start position. + */ + ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, + GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); + + if (ebuf->buf == NULL) { + ebuf->size = PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + } + + if (ebuf->buf == NULL) { + ebuf->size = 128; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + } + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->start = pos; + + return 0; +} + +static void i915_error_object_free(struct drm_i915_error_object *obj) +{ + int page; + + if (obj == NULL) + return; + + for (page = 0; page < obj->page_count; page++) + kfree(obj->pages[page]); + + kfree(obj); +} + +static void i915_error_state_free(struct kref *error_ref) +{ + struct drm_i915_error_state *error = container_of(error_ref, + typeof(*error), ref); + int i; + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + i915_error_object_free(error->ring[i].batchbuffer); + i915_error_object_free(error->ring[i].ringbuffer); + i915_error_object_free(error->ring[i].ctx); + kfree(error->ring[i].requests); + } + + kfree(error->active_bo); + kfree(error->overlay); + kfree(error->display); + kfree(error); +} + +static struct drm_i915_error_object * +i915_error_object_create_sized(struct drm_i915_private *dev_priv, + struct drm_i915_gem_object *src, + const int num_pages) +{ + struct drm_i915_error_object *dst; + int i; + u32 reloc_offset; + + if (src == NULL || src->pages == NULL) + return NULL; + + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); + if (dst == NULL) + return NULL; + + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); + for (i = 0; i < num_pages; i++) { + unsigned long flags; + void *d; + + d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (d == NULL) + goto unwind; + + local_irq_save(flags); + if (reloc_offset < dev_priv->gtt.mappable_end && + src->has_global_gtt_mapping) { + void __iomem *s; + + /* Simply ignore tiling or any overlapping fence. + * It's part of the error state, and this hopefully + * captures what the GPU read. + */ + + s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc_offset); + memcpy_fromio(d, s, PAGE_SIZE); + io_mapping_unmap_atomic(s); + } else if (src->stolen) { + unsigned long offset; + + offset = dev_priv->mm.stolen_base; + offset += src->stolen->start; + offset += i << PAGE_SHIFT; + + memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); + } else { + struct page *page; + void *s; + + page = i915_gem_object_get_page(src, i); + + drm_clflush_pages(&page, 1); + + s = kmap_atomic(page); + memcpy(d, s, PAGE_SIZE); + kunmap_atomic(s); + + drm_clflush_pages(&page, 1); + } + local_irq_restore(flags); + + dst->pages[i] = d; + + reloc_offset += PAGE_SIZE; + } + dst->page_count = num_pages; + + return dst; + +unwind: + while (i--) + kfree(dst->pages[i]); + kfree(dst); + return NULL; +} +#define i915_error_object_create(dev_priv, src) \ + i915_error_object_create_sized((dev_priv), (src), \ + (src)->base.size>>PAGE_SHIFT) + +static void capture_bo(struct drm_i915_error_buffer *err, + struct drm_i915_gem_object *obj) +{ + err->size = obj->base.size; + err->name = obj->base.name; + err->rseqno = obj->last_read_seqno; + err->wseqno = obj->last_write_seqno; + err->gtt_offset = i915_gem_obj_ggtt_offset(obj); + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : -1; + err->cache_level = obj->cache_level; +} + +static u32 capture_active_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, global_list) { + if (obj->pin_count == 0) + continue; + + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < dev_priv->num_fence_regs; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + default: + BUG(); + } +} + +static struct drm_i915_error_object * +i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + u32 seqno; + + if (!ring->get_seqno) + return NULL; + + if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { + u32 acthd = I915_READ(ACTHD); + + if (WARN_ON(ring->id != RCS)) + return NULL; + + obj = ring->private; + if (acthd >= i915_gem_obj_ggtt_offset(obj) && + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) + return i915_error_object_create(dev_priv, obj); + } + + seqno = ring->get_seqno(ring, false); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->ring != ring) + continue; + + if (i915_seqno_passed(seqno, obj->last_read_seqno)) + continue; + + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) + continue; + + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userspace. + */ + return i915_error_object_create(dev_priv, obj); + } + + return NULL; +} + +static void i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 6) { + error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); + error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); + error->semaphore_mboxes[ring->id][0] + = I915_READ(RING_SYNC_0(ring->mmio_base)); + error->semaphore_mboxes[ring->id][1] + = I915_READ(RING_SYNC_1(ring->mmio_base)); + error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; + error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; + } + + if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); + error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); + error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); + error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); + error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); + if (ring->id == RCS) + error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); + error->ipeir[ring->id] = I915_READ(IPEIR); + error->ipehr[ring->id] = I915_READ(IPEHR); + error->instdone[ring->id] = I915_READ(INSTDONE); + } + + error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); + error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); + error->seqno[ring->id] = ring->get_seqno(ring, false); + error->acthd[ring->id] = intel_ring_get_active_head(ring); + error->head[ring->id] = I915_READ_HEAD(ring); + error->tail[ring->id] = I915_READ_TAIL(ring); + error->ctl[ring->id] = I915_READ_CTL(ring); + + error->cpu_ring_head[ring->id] = ring->head; + error->cpu_ring_tail[ring->id] = ring->tail; +} + + +static void i915_gem_record_active_context(struct intel_ring_buffer *ring, + struct drm_i915_error_state *error, + struct drm_i915_error_ring *ering) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj; + + /* Currently render ring is the only HW context user */ + if (ring->id != RCS || !error->ccid) + return; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { + ering->ctx = i915_error_object_create_sized(dev_priv, + obj, 1); + break; + } + } +} + +static void i915_gem_record_rings(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct drm_i915_gem_request *request; + int i, count; + + for_each_ring(ring, dev_priv, i) { + i915_record_ring_state(dev, error, ring); + + error->ring[i].batchbuffer = + i915_error_first_batchbuffer(dev_priv, ring); + + error->ring[i].ringbuffer = + i915_error_object_create(dev_priv, ring->obj); + + + i915_gem_record_active_context(ring, error, &error->ring[i]); + + count = 0; + list_for_each_entry(request, &ring->request_list, list) + count++; + + error->ring[i].num_requests = count; + error->ring[i].requests = + kmalloc(count*sizeof(struct drm_i915_error_request), + GFP_ATOMIC); + if (error->ring[i].requests == NULL) { + error->ring[i].num_requests = 0; + continue; + } + + count = 0; + list_for_each_entry(request, &ring->request_list, list) { + struct drm_i915_error_request *erq; + + erq = &error->ring[i].requests[count++]; + erq->seqno = request->seqno; + erq->jiffies = request->emitted_jiffies; + erq->tail = request->tail; + } + } +} + +static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct drm_i915_gem_object *obj; + int i; + + i = 0; + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) + i++; + error->active_bo_count = i; + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + if (obj->pin_count) + i++; + error->pinned_bo_count = i - error->active_bo_count; + + if (i) { + error->active_bo = kmalloc(sizeof(*error->active_bo)*i, + GFP_ATOMIC); + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; + } + + if (error->active_bo) + error->active_bo_count = + capture_active_bo(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_pinned_bo(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.bound_list); +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +void i915_capture_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + int pipe; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + if (error) + return; + + /* Account for pipe specific data like PIPE*STAT */ + error = kzalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + return; + } + + DRM_INFO("capturing error event; look for more information in " + "/sys/class/drm/card%d/error\n", dev->primary->index); + + kref_init(&error->ref); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + if (HAS_HW_CONTEXTS(dev)) + error->ccid = I915_READ(CCID); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(dev)) + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + else if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + else + error->ier = I915_READ(IER); + + if (INTEL_INFO(dev)->gen >= 6) + error->derrmr = I915_READ(DERRMR); + + if (IS_VALLEYVIEW(dev)) + error->forcewake = I915_READ(FORCEWAKE_VLV); + else if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + else if (INTEL_INFO(dev)->gen == 6) + error->forcewake = I915_READ(FORCEWAKE); + + if (!HAS_PCH_SPLIT(dev)) + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); + + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + error->done_reg = I915_READ(DONE_REG); + } + + if (INTEL_INFO(dev)->gen == 7) + error->err_int = I915_READ(GEN7_ERR_INT); + + i915_get_extra_instdone(dev, error->extra_instdone); + + i915_gem_capture_buffers(dev_priv, error); + i915_gem_record_fences(dev, error); + i915_gem_record_rings(dev, error); + + do_gettimeofday(&error->time); + + error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + if (dev_priv->gpu_error.first_error == NULL) { + dev_priv->gpu_error.first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + i915_error_state_free(&error->ref); +} + +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error_priv->error = dev_priv->gpu_error.first_error; + if (error_priv->error) + kref_get(&error_priv->error->ref); + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + +} + +void i915_error_state_put(struct i915_error_state_file_priv *error_priv) +{ + if (error_priv->error) + kref_put(&error_priv->error->ref, i915_error_state_free); +} + +void i915_destroy_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + dev_priv->gpu_error.first_error = NULL; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + kref_put(&error->ref, i915_error_state_free); +} + +const char *i915_cache_level_str(int type) +{ + switch (type) { + case I915_CACHE_NONE: return " uncached"; + case I915_CACHE_LLC: return " snooped (LLC)"; + case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; + default: return ""; + } +} + +/* NB: please notice the memset */ +void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + + switch (INTEL_INFO(dev)->gen) { + case 2: + case 3: + instdone[0] = I915_READ(INSTDONE); + break; + case 4: + case 5: + case 6: + instdone[0] = I915_READ(INSTDONE_I965); + instdone[1] = I915_READ(INSTDONE1); + break; + default: + WARN_ONCE(1, "Unsupported platform\n"); + case 7: + instdone[0] = I915_READ(GEN7_INSTDONE_1); + instdone[1] = I915_READ(GEN7_SC_INSTDONE); + instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); + instdone[3] = I915_READ(GEN7_ROW_INSTDONE); + break; + } +} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cf1a21a9728a..64db680fb7a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1493,535 +1493,6 @@ static void i915_error_work_func(struct work_struct *work) } } -/* NB: please notice the memset */ -static void i915_get_extra_instdone(struct drm_device *dev, - uint32_t *instdone) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - - switch(INTEL_INFO(dev)->gen) { - case 2: - case 3: - instdone[0] = I915_READ(INSTDONE); - break; - case 4: - case 5: - case 6: - instdone[0] = I915_READ(INSTDONE_I965); - instdone[1] = I915_READ(INSTDONE1); - break; - default: - WARN_ONCE(1, "Unsupported platform\n"); - case 7: - instdone[0] = I915_READ(GEN7_INSTDONE_1); - instdone[1] = I915_READ(GEN7_SC_INSTDONE); - instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone[3] = I915_READ(GEN7_ROW_INSTDONE); - break; - } -} - -#ifdef CONFIG_DEBUG_FS -static struct drm_i915_error_object * -i915_error_object_create_sized(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - const int num_pages) -{ - struct drm_i915_error_object *dst; - int i; - u32 reloc_offset; - - if (src == NULL || src->pages == NULL) - return NULL; - - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) - return NULL; - - reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); - for (i = 0; i < num_pages; i++) { - unsigned long flags; - void *d; - - d = kmalloc(PAGE_SIZE, GFP_ATOMIC); - if (d == NULL) - goto unwind; - - local_irq_save(flags); - if (reloc_offset < dev_priv->gtt.mappable_end && - src->has_global_gtt_mapping) { - void __iomem *s; - - /* Simply ignore tiling or any overlapping fence. - * It's part of the error state, and this hopefully - * captures what the GPU read. - */ - - s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc_offset); - memcpy_fromio(d, s, PAGE_SIZE); - io_mapping_unmap_atomic(s); - } else if (src->stolen) { - unsigned long offset; - - offset = dev_priv->mm.stolen_base; - offset += src->stolen->start; - offset += i << PAGE_SHIFT; - - memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); - } else { - struct page *page; - void *s; - - page = i915_gem_object_get_page(src, i); - - drm_clflush_pages(&page, 1); - - s = kmap_atomic(page); - memcpy(d, s, PAGE_SIZE); - kunmap_atomic(s); - - drm_clflush_pages(&page, 1); - } - local_irq_restore(flags); - - dst->pages[i] = d; - - reloc_offset += PAGE_SIZE; - } - dst->page_count = num_pages; - - return dst; - -unwind: - while (i--) - kfree(dst->pages[i]); - kfree(dst); - return NULL; -} -#define i915_error_object_create(dev_priv, src) \ - i915_error_object_create_sized((dev_priv), (src), \ - (src)->base.size>>PAGE_SHIFT) - -static void -i915_error_object_free(struct drm_i915_error_object *obj) -{ - int page; - - if (obj == NULL) - return; - - for (page = 0; page < obj->page_count; page++) - kfree(obj->pages[page]); - - kfree(obj); -} - -void -i915_error_state_free(struct kref *error_ref) -{ - struct drm_i915_error_state *error = container_of(error_ref, - typeof(*error), ref); - int i; - - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - i915_error_object_free(error->ring[i].batchbuffer); - i915_error_object_free(error->ring[i].ringbuffer); - i915_error_object_free(error->ring[i].ctx); - kfree(error->ring[i].requests); - } - - kfree(error->active_bo); - kfree(error->overlay); - kfree(error->display); - kfree(error); -} -static void capture_bo(struct drm_i915_error_buffer *err, - struct drm_i915_gem_object *obj) -{ - err->size = obj->base.size; - err->name = obj->base.name; - err->rseqno = obj->last_read_seqno; - err->wseqno = obj->last_write_seqno; - err->gtt_offset = i915_gem_obj_ggtt_offset(obj); - err->read_domains = obj->base.read_domains; - err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (obj->pin_count > 0) - err->pinned = 1; - if (obj->user_pin_count > 0) - err->pinned = -1; - err->tiling = obj->tiling_mode; - err->dirty = obj->dirty; - err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring ? obj->ring->id : -1; - err->cache_level = obj->cache_level; -} - -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, mm_list) { - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, global_list) { - if (obj->pin_count == 0) - continue; - - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static void i915_gem_record_fences(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - - default: - BUG(); - } -} - -static struct drm_i915_error_object * -i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) -{ - struct drm_i915_gem_object *obj; - u32 seqno; - - if (!ring->get_seqno) - return NULL; - - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { - u32 acthd = I915_READ(ACTHD); - - if (WARN_ON(ring->id != RCS)) - return NULL; - - obj = ring->private; - if (acthd >= i915_gem_obj_ggtt_offset(obj) && - acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) - return i915_error_object_create(dev_priv, obj); - } - - seqno = ring->get_seqno(ring, false); - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (obj->ring != ring) - continue; - - if (i915_seqno_passed(seqno, obj->last_read_seqno)) - continue; - - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) - continue; - - /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userspace. - */ - return i915_error_object_create(dev_priv, obj); - } - - return NULL; -} - -static void i915_record_ring_state(struct drm_device *dev, - struct drm_i915_error_state *error, - struct intel_ring_buffer *ring) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { - error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); - error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); - error->semaphore_mboxes[ring->id][0] - = I915_READ(RING_SYNC_0(ring->mmio_base)); - error->semaphore_mboxes[ring->id][1] - = I915_READ(RING_SYNC_1(ring->mmio_base)); - error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; - error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; - } - - if (INTEL_INFO(dev)->gen >= 4) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); - error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); - error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); - error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); - error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - if (ring->id == RCS) - error->bbaddr = I915_READ64(BB_ADDR); - } else { - error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); - error->ipeir[ring->id] = I915_READ(IPEIR); - error->ipehr[ring->id] = I915_READ(IPEHR); - error->instdone[ring->id] = I915_READ(INSTDONE); - } - - error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); - error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring, false); - error->acthd[ring->id] = intel_ring_get_active_head(ring); - error->head[ring->id] = I915_READ_HEAD(ring); - error->tail[ring->id] = I915_READ_TAIL(ring); - error->ctl[ring->id] = I915_READ_CTL(ring); - - error->cpu_ring_head[ring->id] = ring->head; - error->cpu_ring_tail[ring->id] = ring->tail; -} - - -static void i915_gem_record_active_context(struct intel_ring_buffer *ring, - struct drm_i915_error_state *error, - struct drm_i915_error_ring *ering) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_object *obj; - - /* Currently render ring is the only HW context user */ - if (ring->id != RCS || !error->ccid) - return; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { - ering->ctx = i915_error_object_create_sized(dev_priv, - obj, 1); - break; - } - } -} - -static void i915_gem_record_rings(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - struct drm_i915_gem_request *request; - int i, count; - - for_each_ring(ring, dev_priv, i) { - i915_record_ring_state(dev, error, ring); - - error->ring[i].batchbuffer = - i915_error_first_batchbuffer(dev_priv, ring); - - error->ring[i].ringbuffer = - i915_error_object_create(dev_priv, ring->obj); - - - i915_gem_record_active_context(ring, error, &error->ring[i]); - - count = 0; - list_for_each_entry(request, &ring->request_list, list) - count++; - - error->ring[i].num_requests = count; - error->ring[i].requests = - kmalloc(count*sizeof(struct drm_i915_error_request), - GFP_ATOMIC); - if (error->ring[i].requests == NULL) { - error->ring[i].num_requests = 0; - continue; - } - - count = 0; - list_for_each_entry(request, &ring->request_list, list) { - struct drm_i915_error_request *erq; - - erq = &error->ring[i].requests[count++]; - erq->seqno = request->seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->tail; - } - } -} - -static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error) -{ - struct drm_i915_gem_object *obj; - int i; - - i = 0; - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) - i++; - error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (obj->pin_count) - i++; - error->pinned_bo_count = i - error->active_bo_count; - - if (i) { - error->active_bo = kmalloc(sizeof(*error->active_bo)*i, - GFP_ATOMIC); - if (error->active_bo) - error->pinned_bo = - error->active_bo + error->active_bo_count; - } - - if (error->active_bo) - error->active_bo_count = - capture_active_bo(error->active_bo, - error->active_bo_count, - &dev_priv->mm.active_list); - - if (error->pinned_bo) - error->pinned_bo_count = - capture_pinned_bo(error->pinned_bo, - error->pinned_bo_count, - &dev_priv->mm.bound_list); -} - -/** - * i915_capture_error_state - capture an error record for later analysis - * @dev: drm device - * - * Should be called when an error is detected (either a hang or an error - * interrupt) to capture error state from the time of the error. Fills - * out a structure which becomes available in debugfs for user level tools - * to pick up. - */ -static void i915_capture_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_error_state *error; - unsigned long flags; - int pipe; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - if (error) - return; - - /* Account for pipe specific data like PIPE*STAT */ - error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - return; - } - - DRM_INFO("capturing error event; look for more information in " - "/sys/class/drm/card%d/error\n", dev->primary->index); - - kref_init(&error->ref); - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); - if (HAS_HW_CONTEXTS(dev)) - error->ccid = I915_READ(CCID); - - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else if (IS_VALLEYVIEW(dev)) - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); - else if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); - - if (INTEL_INFO(dev)->gen >= 6) - error->derrmr = I915_READ(DERRMR); - - if (IS_VALLEYVIEW(dev)) - error->forcewake = I915_READ(FORCEWAKE_VLV); - else if (INTEL_INFO(dev)->gen >= 7) - error->forcewake = I915_READ(FORCEWAKE_MT); - else if (INTEL_INFO(dev)->gen == 6) - error->forcewake = I915_READ(FORCEWAKE); - - if (!HAS_PCH_SPLIT(dev)) - for_each_pipe(pipe) - error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); - - if (INTEL_INFO(dev)->gen >= 6) { - error->error = I915_READ(ERROR_GEN6); - error->done_reg = I915_READ(DONE_REG); - } - - if (INTEL_INFO(dev)->gen == 7) - error->err_int = I915_READ(GEN7_ERR_INT); - - i915_get_extra_instdone(dev, error->extra_instdone); - - i915_gem_capture_buffers(dev_priv, error); - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); - - do_gettimeofday(&error->time); - - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - if (dev_priv->gpu_error.first_error == NULL) { - dev_priv->gpu_error.first_error = error; - error = NULL; - } - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - i915_error_state_free(&error->ref); -} - -void i915_destroy_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_error_state *error; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - dev_priv->gpu_error.first_error = NULL; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - kref_put(&error->ref, i915_error_state_free); -} -#else -#define i915_capture_error_state(x) -#endif - static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bca81eecd31e..b1b1d2a38fc9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10176,9 +10176,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) return 0; } -#ifdef CONFIG_DEBUG_FS -#include - struct intel_display_error_state { u32 power_well_driver; @@ -10322,4 +10319,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " BASE: %08x\n", error->cursor[i].base); } } -#endif diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 81c3ca14fa92..2abb53e6f1e0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1412,9 +1412,6 @@ void intel_cleanup_overlay(struct drm_device *dev) kfree(dev_priv->overlay); } -#ifdef CONFIG_DEBUG_FS -#include - struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; @@ -1537,4 +1534,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m, P(UVSCALEV); #undef P } -#endif -- cgit v1.2.3 From 426115cf5dd81d17a6322c493ca337e637ce2aed Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Jul 2013 22:13:42 +0200 Subject: drm/i915: clean up vlv ->pre_pll_enable and pll enable sequence No need to call the ->pre_pll_enable hook twice if we don't enable the dpll too early. This should make Jani a bit less grumpy. v2: Rebase on top of the newly-colored BUG_ONs. v3: Reinstate the lost write of the DPLL_MD register, spotted by Imre. Cc: Imre Deak Cc: Jani Nikula Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 47 +++++++++++++++--------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b1b1d2a38fc9..abaae78f0c52 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1321,32 +1321,40 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); } -static void vlv_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void vlv_enable_pll(struct intel_crtc *crtc) { - int reg; - u32 val; + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int reg = DPLL(crtc->pipe); + u32 dpll = crtc->config.dpll_hw_state.dpll; - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, crtc->pipe); /* No really, not for ILK+ */ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) - assert_panel_unlocked(dev_priv, pipe); + assert_panel_unlocked(dev_priv, crtc->pipe); - reg = DPLL(pipe); - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, dpll); + POSTING_READ(reg); + udelay(150); + + if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); + + I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); + POSTING_READ(DPLL_MD(crtc->pipe)); /* We do this three times for luck */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ } @@ -3654,7 +3662,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - vlv_enable_pll(dev_priv, pipe); + vlv_enable_pll(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -4409,7 +4417,6 @@ static void vlv_update_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; int pipe = crtc->pipe; u32 dpll, mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; @@ -4498,10 +4505,6 @@ static void vlv_update_pll(struct intel_crtc *crtc) vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - /* Enable DPIO clock input */ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; @@ -4511,20 +4514,10 @@ static void vlv_update_pll(struct intel_crtc *crtc) dpll |= DPLL_VCO_ENABLE; crtc->config.dpll_hw_state.dpll = dpll; - I915_WRITE(DPLL(pipe), dpll); - POSTING_READ(DPLL(pipe)); - udelay(150); - - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) - DRM_ERROR("DPLL %d failed to lock\n", pipe); - dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; crtc->config.dpll_hw_state.dpll_md = dpll_md; - I915_WRITE(DPLL_MD(pipe), dpll_md); - POSTING_READ(DPLL_MD(pipe)); - if (crtc->config.has_dp_encoder) intel_dp_set_m_n(crtc); -- cgit v1.2.3 From 3ad8a208abbe1bdfe31512053a81ac4938aed447 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:32 +0200 Subject: drm/i915: Fix up cpt pixel multiplier enable sequence Bspec for the "DPLL HDMI multiplier" field says: "Restriction : The DPLL must be enabled and stable before setting these bits. These bits must be programmed after DPLL_SEL is programmed." There is apparently no restriction on programming the DPLL_SEL register wrt the DPLL. So let's just move that up before we enable the pch dpll. Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index abaae78f0c52..3d1c97abee28 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3004,15 +3004,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_shared_dpll tries to do the right thing, but - * get_shared_dpll unconditionally resets the pll - we need that to have - * the right LVDS enable sequence. */ - ironlake_enable_shared_dpll(intel_crtc); - + /* We need to program the right clock selection before writing the pixel + * mutliplier into the DPLL. */ if (HAS_PCH_CPT(dev)) { u32 sel; @@ -3026,6 +3019,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(PCH_DPLL_SEL, temp); } + /* XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that to have + * the right LVDS enable sequence. */ + ironlake_enable_shared_dpll(intel_crtc); + /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); ironlake_pch_transcoder_set_timings(intel_crtc, pipe); -- cgit v1.2.3 From 50b44a449ff1a19712ebc36ffccf9ac0a68033bf Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jun 2013 13:34:33 +0200 Subject: drm/i915: clear DPLL reg when disabling i9xx dplls Toghether with the hw state readout this should catch cases where we don't properly updated the pll state (either in sw or hw). At least for the shared dpll code the equivalent tricke helped a lot in catching bugs. Also rename the function prefix, it's not a generic piece of infrastructure. Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3d1c97abee28..ae3dc5d1ff52 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1406,7 +1406,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) } /** - * intel_disable_pll - disable a PLL + * i9xx_disable_pll - disable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to disable * @@ -1414,11 +1414,8 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) * * Note! This is for pre-ILK only. */ -static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { - int reg; - u32 val; - /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; @@ -1426,11 +1423,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); - reg = DPLL(pipe); - val = I915_READ(reg); - val &= ~DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(DPLL(pipe), 0); + POSTING_READ(DPLL(pipe)); } void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) @@ -3782,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - intel_disable_pll(dev_priv, pipe); + i9xx_disable_pll(dev_priv, pipe); intel_crtc->active = false; intel_update_fbc(dev); -- cgit v1.2.3 From 0d8ff15e9a15f2b393e53337a107b7a1e5919b6d Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 4 Jul 2013 11:02:03 -0700 Subject: drm/i915/hsw: Set correct Haswell PTE encodings. The cacheability controls have changed, and the bits have been rearranged in general. Note that age 0 is the oldest (most likely to get evicted) and age 3 is the youngest (most likely to stick around for a bit). We've picked 0 for no reason, but atm it shouldn't matter anyway (since we don't yet try to differentiate between different objects). v2: Remove comments for snb/ivb cache leves, that's a separate change. v3: Resolve conflicts due to patch series reordering. v4: Rebased on top of Kenneth Graunke's ->pte_encode refactoring. v5: Removed eLLC bits for separate patch. In the internal repository this was: Signed-off-by: Ben Widawsky Signed-off-by: Kenneth Graunke Reviewed-by: Damien Lespiau [danvet: Add comment about cache ages as requested by Ben provoked due to a question from Damien.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 242d0f9bb9e4..5534dd5cea58 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -33,6 +33,7 @@ /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) +#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) #define GEN6_PDE_VALID (1 << 0) /* gen6+ has bit 11-4 for physical addr bit 39-32 */ @@ -44,6 +45,14 @@ #define GEN6_PTE_CACHE_LLC (2 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) + +/* Cacheability Control is a 4-bit value. The low three bits are stored in * + * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. + */ +#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ + (((bits) & 0x8) << (11 - 3))) +#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, enum i915_cache_level level) @@ -92,10 +101,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); + pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) - pte |= GEN6_PTE_CACHE_LLC; + pte |= HSW_WB_LLC_AGE0; return pte; } -- cgit v1.2.3 From 05e21cc43da5a1a58b34a2cfad13d22bcfeb1f2b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 4 Jul 2013 11:02:04 -0700 Subject: drm/i915: Define some of the eLLC magic The EDRAM present register isn't really defined in the docs. It just says check to see if it's set to 1. So I haven't defined the 1 value not knowing what it actually means. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 20b10a0fa452..1246dfd5ff92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4147,8 +4147,8 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; - if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) - I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); + if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) + I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); if (HAS_PCH_NOP(dev)) { u32 temp = I915_READ(GEN7_MSG_CTL); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index dc3d6a74f391..5e58a44c5fe3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4472,6 +4472,10 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define HSW_IDICR 0x9008 +#define IDIHASHMSK(x) (((x) & 0x3f) << 16) +#define HSW_EDRAM_PRESENT 0x120010 + #define GEN6_UCGCTL1 0x9400 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) -- cgit v1.2.3 From 59124506ba5297e48410e410c3bce83784fddf58 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 4 Jul 2013 11:02:05 -0700 Subject: drm/i915: store eLLC size The eLLC cannot be determined by PCIID because as far as we know, even machines supporting eLLC may not have it enabled, or fused off or whatever. It's possible this isn't actually true, and at that point we can switch to a DEV_INFO flag instead. I've defined everything where the docs are clear, and left the rest as magic. But we need it before we set the pte_encode function pointers, which happens really early, in gtt_init. The problem with just doing the normal sequence earlier is we don't have the ability to use forcewake until after the pte functions have been set up. Since all solutions are somewhat ugly (barring rewriting all the init ordering), I've opted to do the detection really early, and the enabling later - since the register to detect doesn't require forcewake. Signed-off-by: Ben Widawsky [danvet: Move dev_priv->ellc_size away from the dri1 dungeon to a nice place right next to the l3 parity stuff. Also squash in the follow-up commit to read out the eLLC size a bit earlier.] Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 10 ++++++++++ drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6ce903306320..a1d04b23e576 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1519,6 +1519,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_early_sanitize_regs(dev); + if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { + /* The docs do not explain exactly how the calculation can be + * made. It is somewhat guessable, but for now, it's always + * 128MB. + * NB: We can't write IDICR yet because we do not have gt funcs + * set up */ + dev_priv->ellc_size = 128; + DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); + } + ret = i915_gem_gtt_init(dev); if (ret) goto put_bridge; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65d54edae176..5d4491e74871 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1151,6 +1151,9 @@ typedef struct drm_i915_private { struct intel_l3_parity l3_parity; + /* Cannot be determined by PCIID. You must always read a register. */ + size_t ellc_size; + /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1246dfd5ff92..53c39bcf8e0e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4147,7 +4147,7 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; - if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) + if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); if (HAS_PCH_NOP(dev)) { -- cgit v1.2.3 From 4d15c145a6234d999c0452eec0d275c1fbf0688c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 4 Jul 2013 11:02:06 -0700 Subject: drm/i915: Use eLLC/LLC by default when available DRI clients really should be using MOCS to get fine grained streaming cache controls. With that note, I *hope* that this patch doesn't improve performance overwhelmingly, because if it does - it means there is a problem elsewhere. In any case, the kernel, and old userspace should get some benefit from this, so let's do it. eLLC is always a good default, and really not using it is the special case for MOCS. References: http://www.intel.com/newsroom/kits/restricted/ha$well!/pdfs/4th_Gen_Intel_Core_PressBriefing_5-29.pdf (page 57) Signed-off-by: Ben Widawsky Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5534dd5cea58..422273328302 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -53,6 +53,7 @@ #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ (((bits) & 0x8) << (11 - 3))) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) +#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, enum i915_cache_level level) @@ -109,6 +110,18 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, return pte; } +static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level) +{ + gen6_gtt_pte_t pte = GEN6_PTE_VALID; + pte |= HSW_PTE_ADDR_ENCODE(addr); + + if (level != I915_CACHE_NONE) + pte |= HSW_WB_ELLC_LLC_AGE0; + + return pte; +} + static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; @@ -861,7 +874,9 @@ int i915_gem_gtt_init(struct drm_device *dev) } else { gtt->gtt_probe = gen6_gmch_probe; gtt->gtt_remove = gen6_gmch_remove; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) && dev_priv->ellc_size) + gtt->pte_encode = iris_pte_encode; + else if (IS_HASWELL(dev)) gtt->pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev)) gtt->pte_encode = byt_pte_encode; -- cgit v1.2.3 From 63573eb7ba3d1bdc1db25fe79314609a4189a306 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 4 Jul 2013 11:02:07 -0700 Subject: drm/i915: debugfs entries for [e]LLC To make users life a little easier figuring out what they have on their system. Ideally, I'd really like to report LLC size, but it turned out to be a bit of a pain. Maybe I'll revisit it in the future. Signed-off-by: Ben Widawsky Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 86379799dab8..8819f851e996 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1530,6 +1530,19 @@ static int i915_dpio_info(struct seq_file *m, void *data) return 0; } +static int i915_llc(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Size calculation for LLC is a bit of a pain. Ignore for now. */ + seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); + seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); + + return 0; +} + static int i915_wedged_get(void *data, u64 *val) { @@ -1959,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_dpio", i915_dpio_info, 0}, + {"i915_llc", i915_llc, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) -- cgit v1.2.3 From d18ea1b58a5003eb6fca03aff03c4c01321e6cb1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 12 Jul 2013 22:43:25 +0200 Subject: drm/i915: unify PM interrupt preinstall sequence Since the addition of VECS we have a slightly different enable sequence for PM interrupts on ivb/hsw vs snb and vlv. Usually that will end up in hard to track down surprises. Hence unifiy things and since we have copies of this code in 3 places now, extract it into its own little helper. Note that this changes the irq preinstall sequence a bit for snb and vlv: We now also clear the PM registers in the preinstall hook, in addition to the PM register clearing/setup already done when actually enabling rps. So this doesn't fix a bug but simply unifies the code across all platforms. After the postinstall hook is similarly unified we can rip out the then redundant PM interrupt setup from the rps code. v3: Rebase on top of the retained double-GTIIR clearing. Also resurrect the masking/disabling of the gen6+ PM interrupts as spotted by Ben Widaswky. v4: Move the DE interrupt reset code out of gen5_gt_irq_preinstall back to ironlake_irq_preinstall where it really belongs. Spotted by Paulo. v3: Improve the commit message to more clearly spell out why we want to unify the code and what exactly changes. Cc: Paulo Zanoni Reviewed-by: Ben Widawsky [danvet: s/GT/PM/ to fix up a comment which Ben spotted while reviewing.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64db680fb7a4..0d54a550ec6d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2054,6 +2054,23 @@ static void ibx_irq_preinstall(struct drm_device *dev) POSTING_READ(SDEIER); } +static void gen5_gt_irq_preinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* and GT */ + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + if (INTEL_INFO(dev)->gen >= 6) { + /* and PM */ + I915_WRITE(GEN6_PMIMR, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0x0); + POSTING_READ(GEN6_PMIER); + } +} + /* drm_dma.h hooks */ static void ironlake_irq_preinstall(struct drm_device *dev) @@ -2064,16 +2081,11 @@ static void ironlake_irq_preinstall(struct drm_device *dev) I915_WRITE(HWSTAM, 0xeffe); - /* XXX hotplug from PCH */ - I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); POSTING_READ(DEIER); - /* and GT */ - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); + gen5_gt_irq_preinstall(dev); ibx_irq_preinstall(dev); } @@ -2092,15 +2104,7 @@ static void ivybridge_irq_preinstall(struct drm_device *dev) I915_WRITE(DEIER, 0x0); POSTING_READ(DEIER); - /* and GT */ - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); - - /* Power management */ - I915_WRITE(GEN6_PMIMR, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0x0); - POSTING_READ(GEN6_PMIER); + gen5_gt_irq_preinstall(dev); ibx_irq_preinstall(dev); } @@ -2121,9 +2125,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev) /* and GT */ I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); + + gen5_gt_irq_preinstall(dev); I915_WRITE(DPINVGTT, 0xff); -- cgit v1.2.3 From 0a9a8c91a5f9617d6fa319fe052de38691fb29cb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 12 Jul 2013 22:43:26 +0200 Subject: drm/i915: unify GT/PM irq postinstall code Again extract a common helper. For the postinstall hook things are a bit more complicated since we have more cases on ilk-hsw/vlv here. But since vlv was clearly broken by failing to initialize dev_priv->gt_irq_mask correctly the shared code is clearly justified. Also kill the PMIER setting in the async rps enable work. I should have been save, but also clearly looked rather fragile. PMIER setup is now all down in the irq pre/postinstall hooks. With this we now have the usual interrupt register sequence for GT/PM irq registers: - IER is setup once with all the interrupts we ever need in the postinstall hook and never touched again. Exceptions are SDEIER, which is touched in the preinstall hook (when the irq handler isn't enabled) and then only from the irq handler. And DEIER/VLV_IER with is used in the irq handler but also written to once in the postinstall hook. But since that write is essentially what enables the interrupt and we should always have MSI interrupts we should be save. In case we ever have non-MSI interrupts we'd be screwed. - IIR is cleared in the postinstall hook before we enable/unmask the respective interrupt sources. Hence we can't steal an interrupt event an accidentally trigger the spurious interrupt logic in the core kernel. Note that after some discussion with Ben Widawsky we think that we actually should clear the IIR registers in the preinstall hook. But doing that is a much larger patch series. - IMR regs are (usually) all masked off. Those are the only regs changed at runtime, which is all protected by dev_priv->irq_lock. This unification also kills the cargo-culted read-modify-write PM register setup for VECS. Interrupt setup is done without userspace being able to interfere, so we better know what values we want to put into those registers. RMW cycles otoh are really good at papering over races, until stuff magically blows up and no one has a clue why. v2: Touch the gen6+ PM interrupt registers only on gen6+. v3: Improve the commit message to more clearly spell out why we want to unify the code and what exactly changes. Cc: Ben Widawsky Cc: Paulo Zanoni Reviewed-by: Ben Widawsky [danvet: Add a comment to explain why the l3 parity interrupt is special.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 93 +++++++++++++++++++---------------------- drivers/gpu/drm/i915/intel_pm.c | 4 -- 2 files changed, 43 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0d54a550ec6d..b388641e0606 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2196,6 +2196,46 @@ static void ibx_irq_postinstall(struct drm_device *dev) I915_WRITE(SDEIMR, ~mask); } +static void gen5_gt_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pm_irqs, gt_irqs; + + pm_irqs = gt_irqs = 0; + + dev_priv->gt_irq_mask = ~0; + if (HAS_L3_GPU_CACHE(dev)) { + /* L3 parity interrupt is always unmasked. */ + dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + } + + gt_irqs |= GT_RENDER_USER_INTERRUPT; + if (IS_GEN5(dev)) { + gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | + ILK_BSD_USER_INTERRUPT; + } else { + gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; + } + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + I915_WRITE(GTIER, gt_irqs); + POSTING_READ(GTIER); + + if (INTEL_INFO(dev)->gen >= 6) { + pm_irqs |= GEN6_PM_RPS_EVENTS; + + if (HAS_VEBOX(dev)) + pm_irqs |= PM_VEBOX_USER_INTERRUPT; + + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); + I915_WRITE(GEN6_PMIMR, 0xffffffff); + I915_WRITE(GEN6_PMIER, pm_irqs); + POSTING_READ(GEN6_PMIER); + } +} + static int ironlake_irq_postinstall(struct drm_device *dev) { unsigned long irqflags; @@ -2206,7 +2246,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_POISON; - u32 gt_irqs; dev_priv->irq_mask = ~display_mask; @@ -2217,21 +2256,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); POSTING_READ(DEIER); - dev_priv->gt_irq_mask = ~0; - - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT; - - if (IS_GEN6(dev)) - gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; - else - gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | - ILK_BSD_USER_INTERRUPT; - - I915_WRITE(GTIER, gt_irqs); - POSTING_READ(GTIER); + gen5_gt_irq_postinstall(dev); ibx_irq_postinstall(dev); @@ -2260,8 +2285,6 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | DE_ERR_INT_IVB; - u32 pm_irqs = GEN6_PM_RPS_EVENTS; - u32 gt_irqs; dev_priv->irq_mask = ~display_mask; @@ -2276,30 +2299,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) DE_PIPEA_VBLANK_IVB); POSTING_READ(DEIER); - dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIER, gt_irqs); - POSTING_READ(GTIER); - - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); - if (HAS_VEBOX(dev)) - pm_irqs |= PM_VEBOX_USER_INTERRUPT; - - /* Our enable/disable rps functions may touch these registers so - * make sure to set a known state for only the non-RPS bits. - * The RMW is extra paranoia since this should be called after being set - * to a known state in preinstall. - * */ - I915_WRITE(GEN6_PMIMR, - (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); - I915_WRITE(GEN6_PMIER, - (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); - POSTING_READ(GEN6_PMIER); + gen5_gt_irq_postinstall(dev); ibx_irq_postinstall(dev); @@ -2309,7 +2309,6 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 gt_irqs; u32 enable_mask; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; unsigned long irqflags; @@ -2349,13 +2348,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; - I915_WRITE(GTIER, gt_irqs); - POSTING_READ(GTIER); + gen5_gt_irq_postinstall(dev); /* ack & enable invalid PTE error interrupts */ #if 0 /* FIXME: add support to irq handler for checking these bits */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a9be0d1c173d..96f08724e491 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3319,8 +3319,6 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); spin_lock_irq(&dev_priv->irq_lock); /* FIXME: Our interrupt enabling sequence is bonghits. * dev_priv->rps.pm_iir really should be 0 here. */ @@ -3599,8 +3597,6 @@ static void valleyview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); -- cgit v1.2.3 From 44fc7d5cf30723563558715f0794c8389a5c15ba Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 12 Jul 2013 22:43:27 +0200 Subject: drm/i915: extract rps interrupt enable/disable helpers The VECS enabling required some changes to how rps interrupts are enabled/disabled since VECS interrupts are handling with the PM interrupt registers. But now that the pre/postinstall sequences is identical for all platforms with rps support (snb, ivb, hsw, vlv) we can also use the exact same sequence to actually enable the rps interrupts. Strictly speaking using spinlocks is overkill on snb/ivb & vlv since they have no VECS ring, but imo that's more than made up by the common code. Hence this just unifies the vlv code with the snb-hsw code which matched exactly before the VECS enabling. See commit eda63ffb906c2fb3b609a0e87aeb63c0f25b9e6b Author: Ben Widawsky Date: Tue May 28 19:22:26 2013 -0700 drm/i915: Add PM regs to pre/post install and commit 4848405cced3b46f4ec7d404b8ed5873171ae10a Author: Ben Widawsky Date: Tue May 28 19:22:27 2013 -0700 drm/i915: make PM interrupt writes non-destructive for why the gen6 code (shared between snb, ivb and hsw) needed to be changed originally. v3: Improve the commit message to more clearly spell out why we want to unify the code and what exactly changes. Cc: Paulo Zanoni Cc: Ben Widawsky Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 59 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 96f08724e491..787a528e7200 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3121,13 +3121,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); } - -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); /* Complete PM interrupt masking here doesn't race with the rps work @@ -3142,23 +3139,23 @@ static void gen6_disable_rps(struct drm_device *dev) I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); } -static void valleyview_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->irq_lock); + gen6_disable_rps_interrupts(dev); +} + +static void valleyview_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); + gen6_disable_rps_interrupts(dev); if (dev_priv->vlv_pctx) { drm_gem_object_unreference(&dev_priv->vlv_pctx->base); @@ -3191,6 +3188,21 @@ int intel_enable_rc6(const struct drm_device *dev) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } +static void gen6_enable_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + /* FIXME: Our interrupt enabling sequence is bonghits. + * dev_priv->rps.pm_iir really should be 0 here. */ + dev_priv->rps.pm_iir = 0; + I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); + I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(&dev_priv->irq_lock); + /* unmask all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); +} + static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3319,15 +3331,7 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); - spin_lock_irq(&dev_priv->irq_lock); - /* FIXME: Our interrupt enabling sequence is bonghits. - * dev_priv->rps.pm_iir really should be 0 here. */ - dev_priv->rps.pm_iir = 0; - I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); - I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); - spin_unlock_irq(&dev_priv->irq_lock); - /* unmask all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); + gen6_enable_rps_interrupts(dev); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); @@ -3597,12 +3601,7 @@ static void valleyview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); - spin_lock_irq(&dev_priv->irq_lock); - WARN_ON(dev_priv->rps.pm_iir != 0); - I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->irq_lock); - /* enable all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); + gen6_enable_rps_interrupts(dev); gen6_gt_force_wake_put(dev_priv); } -- cgit v1.2.3 From a0b3335a2141aadb8f2398ade97fe574f2ddc875 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Jul 2013 23:35:34 +0200 Subject: drm/i915: simplify rps interrupt enabling/disabling sequence At the moment we have the following interrupt enabling sequence: 1. irq preinstall hook 2. enabling the interrupt handler and calling irq postinstall hook 3. enable rps interrupts from the async work And the folliwing disable sequence: 1. disabling the interrupt handler and calling the uninstall hook 2. disabling the rps interrupt Since the postinstall hook now always sets up PMIIR, PMIER and PMIMR to known-good states there no way for an interrupt to sneak in in the enable sequence, so we can reinstate the WARN lost in commit eda63ffb906c2fb3b609a0e87aeb63c0f25b9e6b Author: Ben Widawsky Date: Tue May 28 19:22:26 2013 -0700 drm/i915: Add PM regs to pre/post install Note that there's some room for future cleanups since most of the interrupt register clearing in the disable function is rather redundant. But that's better done in follow-up patches, if at all. Cc: Ben Widawsky Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 787a528e7200..bc5aae088550 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3193,9 +3193,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; spin_lock_irq(&dev_priv->irq_lock); - /* FIXME: Our interrupt enabling sequence is bonghits. - * dev_priv->rps.pm_iir really should be 0 here. */ - dev_priv->rps.pm_iir = 0; + WARN_ON(dev_priv->rps.pm_iir); I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); -- cgit v1.2.3 From 7457d61748f7939dea49849db442cb3df4c7c3fe Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 7 Jun 2013 17:41:07 +0100 Subject: drm/i915: We implement WaFbcWaitForVBlankBeforeEnable for ilk and snb We also wait for that blank on other platforms but the w/a doesn't apply there. Not an issue at all. Signed-off-by: Damien Lespiau Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index bc5aae088550..cae89fe9b84f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -404,6 +404,8 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) * following the termination of the page-flipping sequence * and indeed performing the enable as a co-routine and not * waiting synchronously upon the vblank. + * + * WaFbcWaitForVBlankBeforeEnable:ilk,snb */ schedule_delayed_work(&work->work, msecs_to_jiffies(50)); } -- cgit v1.2.3 From 4bb353343dcc2486a1deda87f4c069153dc353c3 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 14 Jun 2013 15:23:24 +0100 Subject: drm/i915: We implement WaFbcAsynchFlipDisableFbcQueue on ilk and snb v2: Put the comment a bit closer to the actual write (Paulo Zanoni) Reviewed-by: Paulo Zanoni Signed-off-by: Damien Lespiau [danvet: Fix space before tab.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cae89fe9b84f..d212e42638d9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4453,6 +4453,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) * The bit 7,8,9 of 0x42020. */ if (IS_IRONLAKE_M(dev)) { + /* WaFbcAsynchFlipDisableFbcQueue:ilk */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); @@ -4589,6 +4590,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) * The bit5 and bit7 of 0x42020 * The bit14 of 0x70180 * The bit14 of 0x71180 + * + * WaFbcAsynchFlipDisableFbcQueue:snb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | -- cgit v1.2.3 From f1e8fa56fd650e0a23f64afdf59f3907d9a89615 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 7 Jun 2013 17:41:09 +0100 Subject: drm/i915: We implement WaFbcDisableDpfcClockGating on ilk Signed-off-by: Damien Lespiau Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d212e42638d9..9b95f78d352a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4416,7 +4416,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; - /* Required for FBC */ + /* + * Required for FBC + * WaFbcDisableDpfcClockGating:ilk + */ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; -- cgit v1.2.3 From 19bc678a6066d4ecca938c50ac2f9e9ccfb0ddbe Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 7 Jun 2013 17:41:10 +0100 Subject: drm/i915: We implement WaMPhyProgramming on Haswell Signed-off-by: Damien Lespiau Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ae3dc5d1ff52..3cda57d7b3f0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5166,7 +5166,10 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) BUG_ON(val != final); } -/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ +/* + * Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. + * WaMPhyProgramming:hsw + */ static void lpt_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; -- cgit v1.2.3 From eb4926e4a6e3922398fd6880f07a84db95aa3741 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Fri, 7 Jun 2013 17:41:14 +0100 Subject: drm/i915: Don't try to calculate RC6 residency on GEN4 and before intel_enable_rc6() is used to check if we can compute the RC6 residency in the sysfs code. Disable this for platforms older than Ironlake. Signed-off-by: Damien Lespiau Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9b95f78d352a..e81d37667bc9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3167,6 +3167,10 @@ static void valleyview_disable_rps(struct drm_device *dev) int intel_enable_rc6(const struct drm_device *dev) { + /* No RC6 before Ironlake */ + if (INTEL_INFO(dev)->gen < 5) + return 0; + /* Respect the kernel parameter if it is set */ if (i915_enable_rc6 >= 0) return i915_enable_rc6; -- cgit v1.2.3 From 11fa3384042f5578e0f6179eef70cbcb2892be92 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2013 17:22:06 +0300 Subject: drm/i915: Fix retrieval of hangcheck stats The default context is always supported (as it contains the global hangcheck stats) and the contexts for hangcheck are not limited to any ring. References: https://bugs.freedesktop.org/show_bug.cgi?id=65845 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem_context.c | 23 ++++++++--------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5d4491e74871..700db059427b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1860,7 +1860,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) } struct i915_ctx_hang_stats * __must_check -i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, +i915_gem_context_get_hang_stats(struct drm_device *dev, struct drm_file *file, u32 id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 2074544682cf..2470206a4d07 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data) } struct i915_ctx_hang_stats * -i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, +i915_gem_context_get_hang_stats(struct drm_device *dev, struct drm_file *file, u32 id) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_context *to; - - if (dev_priv->hw_contexts_disabled) - return ERR_PTR(-ENOENT); - - if (ring->id != RCS) - return ERR_PTR(-EINVAL); - - if (file == NULL) - return ERR_PTR(-EINVAL); + struct i915_hw_context *ctx; if (id == DEFAULT_CONTEXT_ID) return &file_priv->hang_stats; - to = i915_gem_context_get(file->driver_priv, id); - if (to == NULL) + ctx = NULL; + if (!dev_priv->hw_contexts_disabled) + ctx = i915_gem_context_get(file->driver_priv, id); + if (ctx == NULL) return ERR_PTR(-ENOENT); - return &to->hang_stats; + return &ctx->hang_stats; } void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) -- cgit v1.2.3 From e85209698649be30cb1389966f29107d63f16940 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2013 17:22:07 +0300 Subject: drm/i915: Replace open-coding of DEFAULT_CONTEXT_ID The intent of the check is made more clear if we use the proper name for 0 here. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 64eda4463b70..1b58694d7be7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -873,7 +873,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BSD: ring = &dev_priv->ring[VCS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; @@ -881,7 +881,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BLT: ring = &dev_priv->ring[BCS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; @@ -889,7 +889,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_VEBOX: ring = &dev_priv->ring[VECS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; -- cgit v1.2.3 From 10cd45b6e8ac1d1a99f6bdf0e0c80f2a1351f3f5 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 3 Jul 2013 17:22:08 +0300 Subject: drm/i915: introduce i915_queue_hangcheck To run hangcheck in near future. Signed-off-by: Mika Kuoppala Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 6 ++---- drivers/gpu/drm/i915/i915_irq.c | 21 ++++++++++++--------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 700db059427b..c93ab68e4da9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1622,6 +1622,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ +void i915_queue_hangcheck(struct drm_device *dev); void i915_hangcheck_elapsed(unsigned long data); void i915_handle_error(struct drm_device *dev, bool wedged); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 53c39bcf8e0e..df6ba10bf3a0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2083,10 +2083,8 @@ int __i915_add_request(struct intel_ring_buffer *ring, ring->outstanding_lazy_request = 0; if (!dev_priv->ums.mm_suspended) { - if (i915_enable_hangcheck) { - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); - } + i915_queue_hangcheck(ring->dev); + if (was_empty) { queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b388641e0606..99106990a326 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -698,18 +698,13 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (ring->obj == NULL) return; trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); wake_up_all(&ring->irq_queue); - if (i915_enable_hangcheck) { - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); - } + i915_queue_hangcheck(dev); } static void gen6_pm_rps_work(struct work_struct *work) @@ -2030,9 +2025,17 @@ void i915_hangcheck_elapsed(unsigned long data) if (busy_count) /* Reset timer case chip hangs without another request * being added */ - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + - DRM_I915_HANGCHECK_JIFFIES)); + i915_queue_hangcheck(dev); +} + +void i915_queue_hangcheck(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (!i915_enable_hangcheck) + return; + + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); } static void ibx_irq_preinstall(struct drm_device *dev) -- cgit v1.2.3 From 853ba5d2231619e1c7f7de1269e135174ec8e3cb Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 16 Jul 2013 16:50:05 -0700 Subject: drm/i915: Move gtt and ppgtt under address space umbrella The GTT and PPGTT can be thought of more generally as GPU address spaces. Many of their actions (insert entries), state (LRU lists), and many of their characteristics (size) can be shared. Do that. The change itself doesn't actually impact most of the VMA/VM rework coming up, it just fits in with the grand scheme of abstracting the GPU VM operations. GGTT will usually be a special case where we either know an object must be in the GGTT (dislay engine, workarounds, etc.). The scratch page is left as part of the VM (even though it's currently shared with the ppgtt code) because in the future when we have Full PPGTT, I intend to create a separate scratch page for each. v2: Drop usage of i915_gtt_vm (Daniel) Make cleanup also part of the parent class (Ben) Modified commit msg Rebased v3: Properly share scratch page (Imre) Finish commit message (Daniel, Imre) Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_dma.c | 4 +- drivers/gpu/drm/i915/i915_drv.h | 57 ++++++------- drivers/gpu/drm/i915/i915_gem.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 165 ++++++++++++++++++++---------------- 5 files changed, 123 insertions(+), 111 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8819f851e996..1c697c0ab7e5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -276,8 +276,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) count, size); seq_printf(m, "%zu [%lu] gtt total\n", - dev_priv->gtt.total, - dev_priv->gtt.mappable_end - dev_priv->gtt.start); + dev_priv->gtt.base.total, + dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); seq_putc(m, '\n'); list_for_each_entry_reverse(file, &dev->filelist, lhead) { diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a1d04b23e576..c5fab2f8cd79 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1673,7 +1673,7 @@ out_gem_unload: out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); - dev_priv->gtt.gtt_remove(dev); + dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: @@ -1768,7 +1768,7 @@ int i915_driver_unload(struct drm_device *dev) destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); - dev_priv->gtt.gtt_remove(dev); + dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c93ab68e4da9..a2c909107a3e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -446,6 +446,29 @@ enum i915_cache_level { typedef uint32_t gen6_gtt_pte_t; +struct i915_address_space { + struct drm_device *dev; + unsigned long start; /* Start offset always 0 for dri2 */ + size_t total; /* size addr space maps (ex. 2GB for ggtt) */ + + struct { + dma_addr_t addr; + struct page *page; + } scratch; + + /* FIXME: Need a more generic return type */ + gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level); + void (*clear_range)(struct i915_address_space *vm, + unsigned int first_entry, + unsigned int num_entries); + void (*insert_entries)(struct i915_address_space *vm, + struct sg_table *st, + unsigned int first_entry, + enum i915_cache_level cache_level); + void (*cleanup)(struct i915_address_space *vm); +}; + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a @@ -454,8 +477,7 @@ typedef uint32_t gen6_gtt_pte_t; * the spec. */ struct i915_gtt { - unsigned long start; /* Start offset of used GTT */ - size_t total; /* Total size GTT can map */ + struct i915_address_space base; size_t stolen_size; /* Total size of stolen memory */ unsigned long mappable_end; /* End offset that we can CPU map */ @@ -466,10 +488,6 @@ struct i915_gtt { void __iomem *gsm; bool do_idle_maps; - struct { - dma_addr_t addr; - struct page *page; - } scratch; int mtrr; @@ -477,38 +495,17 @@ struct i915_gtt { int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, size_t *stolen, phys_addr_t *mappable_base, unsigned long *mappable_end); - void (*gtt_remove)(struct drm_device *dev); - void (*gtt_clear_range)(struct drm_device *dev, - unsigned int first_entry, - unsigned int num_entries); - void (*gtt_insert_entries)(struct drm_device *dev, - struct sg_table *st, - unsigned int pg_start, - enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level); }; -#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) +#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) struct i915_hw_ppgtt { - struct drm_device *dev; + struct i915_address_space base; unsigned num_pd_entries; struct page **pt_pages; uint32_t pd_offset; dma_addr_t *pt_dma_addr; - /* pte functions, mirroring the interface of the global gtt. */ - void (*clear_range)(struct i915_hw_ppgtt *ppgtt, - unsigned int first_entry, - unsigned int num_entries); - void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, - struct sg_table *st, - unsigned int pg_start, - enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level); int (*enable)(struct drm_device *dev); - void (*cleanup)(struct i915_hw_ppgtt *ppgtt); }; struct i915_ctx_hang_stats { @@ -1124,7 +1121,7 @@ typedef struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; - struct i915_gtt gtt; + struct i915_gtt gtt; /* VMA representing the global address space */ struct i915_gem_mm mm; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df6ba10bf3a0..4457d730b2e2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -181,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned += i915_gem_obj_ggtt_size(obj); mutex_unlock(&dev->struct_mutex); - args->aper_size = dev_priv->gtt.total; + args->aper_size = dev_priv->gtt.base.total; args->aper_available_size = args->aper_size - pinned; return 0; @@ -3079,7 +3079,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; size_t gtt_max = map_and_fenceable ? - dev_priv->gtt.mappable_end : dev_priv->gtt.total; + dev_priv->gtt.mappable_end : dev_priv->gtt.base.total; int ret; fence_size = i915_gem_get_gtt_size(dev, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 422273328302..f982bf0de157 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -124,7 +124,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; + struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; gen6_gtt_pte_t __iomem *pd_addr; uint32_t pd_entry; int i; @@ -203,18 +203,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev) } /* PPGTT support for Sandybdrige/Gen6 and later */ -static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, +static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_entry, unsigned num_entries) { - struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr, scratch_pte; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr, - I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); while (num_entries) { last_pte = first_pte + num_entries; @@ -234,11 +234,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, } } -static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, +static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, unsigned first_entry, enum i915_cache_level cache_level) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; @@ -249,7 +251,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, dma_addr_t page_addr; page_addr = sg_page_iter_dma_address(&sg_iter); - pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level); + pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); act_pt++; @@ -261,13 +263,15 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); int i; if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->dev->pdev, + pci_unmap_page(ppgtt->base.dev->pdev, ppgtt->pt_dma_addr[i], 4096, PCI_DMA_BIDIRECTIONAL); } @@ -281,7 +285,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - struct drm_device *dev = ppgtt->dev; + struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned first_pd_entry_in_global_pt; int i; @@ -293,17 +297,18 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); if (IS_HASWELL(dev)) { - ppgtt->pte_encode = hsw_pte_encode; + ppgtt->base.pte_encode = hsw_pte_encode; } else if (IS_VALLEYVIEW(dev)) { - ppgtt->pte_encode = byt_pte_encode; + ppgtt->base.pte_encode = byt_pte_encode; } else { - ppgtt->pte_encode = gen6_pte_encode; + ppgtt->base.pte_encode = gen6_pte_encode; } ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; ppgtt->enable = gen6_ppgtt_enable; - ppgtt->clear_range = gen6_ppgtt_clear_range; - ppgtt->insert_entries = gen6_ppgtt_insert_entries; - ppgtt->cleanup = gen6_ppgtt_cleanup; + ppgtt->base.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.cleanup = gen6_ppgtt_cleanup; + ppgtt->base.scratch = dev_priv->gtt.base.scratch; ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, GFP_KERNEL); if (!ppgtt->pt_pages) @@ -334,8 +339,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->pt_dma_addr[i] = pt_addr; } - ppgtt->clear_range(ppgtt, 0, - ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); @@ -368,7 +373,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return -ENOMEM; - ppgtt->dev = dev; + ppgtt->base.dev = dev; if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); @@ -391,7 +396,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return; - ppgtt->cleanup(ppgtt); + ppgtt->base.cleanup(&ppgtt->base); dev_priv->mm.aliasing_ppgtt = NULL; } @@ -399,17 +404,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { - ppgtt->insert_entries(ppgtt, obj->pages, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - cache_level); + ppgtt->base.insert_entries(&ppgtt->base, obj->pages, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, + cache_level); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj) { - ppgtt->clear_range(ppgtt, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + ppgtt->base.clear_range(&ppgtt->base, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); } extern int intel_iommu_gfx_mapped; @@ -456,8 +461,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct drm_i915_gem_object *obj; /* First fill our portion of the GTT with scratch pages */ - dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, - dev_priv->gtt.total / PAGE_SIZE); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + dev_priv->gtt.base.start / PAGE_SIZE, + dev_priv->gtt.base.total / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { i915_gem_clflush_object(obj); @@ -486,12 +492,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) * within the global GTT as well as accessible by the GPU through the GMADR * mapped BAR (dev_priv->mm.gtt->gtt). */ -static void gen6_ggtt_insert_entries(struct drm_device *dev, +static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int first_entry, enum i915_cache_level level) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t __iomem *gtt_entries = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; @@ -500,8 +506,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(dev_priv->gtt.pte_encode(addr, level), - >t_entries[i]); + iowrite32(vm->pte_encode(addr, level), >t_entries[i]); i++; } @@ -512,8 +517,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, * hardware should work, we must keep this posting read for paranoia. */ if (i != 0) - WARN_ON(readl(>t_entries[i-1]) - != dev_priv->gtt.pte_encode(addr, level)); + WARN_ON(readl(>t_entries[i-1]) != + vm->pte_encode(addr, level)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -523,11 +528,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, POSTING_READ(GFX_FLSH_CNTL_GEN6); } -static void gen6_ggtt_clear_range(struct drm_device *dev, +static void gen6_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; @@ -538,15 +543,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr, - I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); } -static void i915_ggtt_insert_entries(struct drm_device *dev, +static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int pg_start, enum i915_cache_level cache_level) @@ -558,7 +562,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev, } -static void i915_ggtt_clear_range(struct drm_device *dev, +static void i915_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries) { @@ -571,10 +575,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - dev_priv->gtt.gtt_insert_entries(dev, obj->pages, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - cache_level); + dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, + entry, + cache_level); obj->has_global_gtt_mapping = 1; } @@ -583,10 +588,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - dev_priv->gtt.gtt_clear_range(obj->base.dev, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + entry, + obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; } @@ -663,20 +669,23 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, obj->has_global_gtt_mapping = 1; } - dev_priv->gtt.start = start; - dev_priv->gtt.total = end - start; + dev_priv->gtt.base.start = start; + dev_priv->gtt.base.total = end - start; /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, hole_start, hole_end) { + const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, - (hole_end-hole_start) / PAGE_SIZE); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + hole_start / PAGE_SIZE, + count); } /* And finally clear the reserved guard page */ - dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + end / PAGE_SIZE - 1, 1); } static bool @@ -699,7 +708,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long gtt_size, mappable_size; - gtt_size = dev_priv->gtt.total; + gtt_size = dev_priv->gtt.base.total; mappable_size = dev_priv->gtt.mappable_end; if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { @@ -744,8 +753,8 @@ static int setup_scratch_page(struct drm_device *dev) #else dma_addr = page_to_phys(page); #endif - dev_priv->gtt.scratch.page = page; - dev_priv->gtt.scratch.addr = dma_addr; + dev_priv->gtt.base.scratch.page = page; + dev_priv->gtt.base.scratch.addr = dma_addr; return 0; } @@ -753,11 +762,13 @@ static int setup_scratch_page(struct drm_device *dev) static void teardown_scratch_page(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - set_pages_wb(dev_priv->gtt.scratch.page, 1); - pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr, + struct page *page = dev_priv->gtt.base.scratch.page; + + set_pages_wb(page, 1); + pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(dev_priv->gtt.scratch.page); - __free_page(dev_priv->gtt.scratch.page); + put_page(page); + __free_page(page); } static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -820,17 +831,18 @@ static int gen6_gmch_probe(struct drm_device *dev, if (ret) DRM_ERROR("Scratch setup failed\n"); - dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; - dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; + dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; return ret; } -static void gen6_gmch_remove(struct drm_device *dev) +static void gen6_gmch_remove(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = dev->dev_private; - iounmap(dev_priv->gtt.gsm); - teardown_scratch_page(dev_priv->dev); + + struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); + iounmap(gtt->gsm); + teardown_scratch_page(vm->dev); } static int i915_gmch_probe(struct drm_device *dev, @@ -851,13 +863,13 @@ static int i915_gmch_probe(struct drm_device *dev, intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); - dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; - dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; + dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; return 0; } -static void i915_gmch_remove(struct drm_device *dev) +static void i915_gmch_remove(struct i915_address_space *vm) { intel_gmch_remove(); } @@ -870,27 +882,30 @@ int i915_gem_gtt_init(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 5) { gtt->gtt_probe = i915_gmch_probe; - gtt->gtt_remove = i915_gmch_remove; + gtt->base.cleanup = i915_gmch_remove; } else { gtt->gtt_probe = gen6_gmch_probe; - gtt->gtt_remove = gen6_gmch_remove; + gtt->base.cleanup = gen6_gmch_remove; if (IS_HASWELL(dev) && dev_priv->ellc_size) - gtt->pte_encode = iris_pte_encode; + gtt->base.pte_encode = iris_pte_encode; else if (IS_HASWELL(dev)) - gtt->pte_encode = hsw_pte_encode; + gtt->base.pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev)) - gtt->pte_encode = byt_pte_encode; + gtt->base.pte_encode = byt_pte_encode; else - gtt->pte_encode = gen6_pte_encode; + gtt->base.pte_encode = gen6_pte_encode; } - ret = gtt->gtt_probe(dev, >t->total, >t->stolen_size, + ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, >t->mappable_base, >t->mappable_end); if (ret) return ret; + gtt->base.dev = dev; + /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20); + DRM_INFO("Memory usable by graphics device = %zdM\n", + gtt->base.total >> 20); DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); -- cgit v1.2.3 From 93bd8649dba3155d1a0ba2a902d9c49f1c75a1da Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 16 Jul 2013 16:50:06 -0700 Subject: drm/i915: Put the mm in the parent address space Every address space should support object allocation. It therefore makes sense to have the allocator be part of the "superclass" which GGTT and PPGTT will derive. Since our maximum address space size is only 2GB we're not yet able to avoid doing allocation/eviction; but we'd hope one day this becomes almost irrelvant. v2: Rebased Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_gtt.c | 17 +++++++++++------ drivers/gpu/drm/i915/i915_gem_stolen.c | 4 ++-- 6 files changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c5fab2f8cd79..5dd4fa5ab89d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1358,7 +1358,7 @@ cleanup_gem: i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_aliasing_ppgtt(dev); - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&dev_priv->gtt.base.mm); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem_stolen: @@ -1758,7 +1758,7 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&dev_priv->gtt.base.mm); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a2c909107a3e..7839b3a485aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -447,6 +447,7 @@ enum i915_cache_level { typedef uint32_t gen6_gtt_pte_t; struct i915_address_space { + struct drm_mm mm; struct drm_device *dev; unsigned long start; /* Start offset always 0 for dri2 */ size_t total; /* size addr space maps (ex. 2GB for ggtt) */ @@ -831,8 +832,6 @@ struct intel_l3_parity { struct i915_gem_mm { /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; - /** Memory allocator for GTT */ - struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head bound_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4457d730b2e2..8f37229a3b52 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3121,7 +3121,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); search_free: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, + ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, &obj->gtt_space, size, alignment, obj->cache_level, 0, gtt_max); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5f8afc48bb7e..f1c9ab096b00 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -78,12 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) - drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level, - 0, dev_priv->gtt.mappable_end); + drm_mm_init_scan_with_range(&dev_priv->gtt.base.mm, min_size, + alignment, cache_level, 0, + dev_priv->gtt.mappable_end); else - drm_mm_init_scan(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level); + drm_mm_init_scan(&dev_priv->gtt.base.mm, min_size, alignment, + cache_level); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f982bf0de157..999ecfecb32e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -269,6 +269,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) container_of(vm, struct i915_hw_ppgtt, base); int i; + drm_mm_takedown(&ppgtt->base.mm); + if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++) pci_unmap_page(ppgtt->base.dev->pdev, @@ -382,8 +384,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (ret) kfree(ppgtt); - else + else { dev_priv->mm.aliasing_ppgtt = ppgtt; + drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, + ppgtt->base.total); + } return ret; } @@ -651,9 +656,9 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, BUG_ON(mappable_end > end); /* Subtract the guard page ... */ - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); + drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE); if (!HAS_LLC(dev)) - dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; + dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { @@ -662,7 +667,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, i915_gem_obj_ggtt_offset(obj), obj->base.size); WARN_ON(i915_gem_obj_ggtt_bound(obj)); - ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, + ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &obj->gtt_space); if (ret) DRM_DEBUG_KMS("Reservation failed\n"); @@ -673,7 +678,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, dev_priv->gtt.base.total = end - start; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, + drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm, hole_start, hole_end) { const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", @@ -727,7 +732,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) return; DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&dev_priv->gtt.base.mm); gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 76c3b8699168..5d38cb0cd1ce 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -396,8 +396,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, */ obj->gtt_space.start = gtt_offset; obj->gtt_space.size = size; - if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { - ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, + if (drm_mm_initialized(&dev_priv->gtt.base.mm)) { + ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &obj->gtt_space); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); -- cgit v1.2.3 From a7bbbd63e79a89b3e7b77eb734f2773ad69a2a43 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 16 Jul 2013 16:50:07 -0700 Subject: drm/i915: Create a global list of vms After we plumb our code to support multiple address spaces (VMs), there are a few situations where we want to be able to traverse the list of all address spaces in the system. Cases like eviction, or error state collection are obvious example. v2: Delete the global link instead of the list head. While this in and of itself shouldn't be really be a problem, doing this allows us to WARN on an non-empty list, which is a problem. (Daniel) Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5dd4fa5ab89d..fd52de77a33f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1492,6 +1492,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) i915_dump_device_info(dev_priv); + INIT_LIST_HEAD(&dev_priv->vm_list); + INIT_LIST_HEAD(&dev_priv->gtt.base.global_link); + list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list); + if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1758,6 +1762,8 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } + list_del(&dev_priv->gtt.base.global_link); + WARN_ON(!list_empty(&dev_priv->vm_list)); drm_mm_takedown(&dev_priv->gtt.base.mm); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7839b3a485aa..1e1664e8a599 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -449,6 +449,7 @@ typedef uint32_t gen6_gtt_pte_t; struct i915_address_space { struct drm_mm mm; struct drm_device *dev; + struct list_head global_link; unsigned long start; /* Start offset always 0 for dri2 */ size_t total; /* size addr space maps (ex. 2GB for ggtt) */ @@ -1120,6 +1121,7 @@ typedef struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; + struct list_head vm_list; /* Global list of all address spaces */ struct i915_gtt gtt; /* VMA representing the global address space */ struct i915_gem_mm mm; -- cgit v1.2.3 From 5cef07e1628300aeda9ac9dae95a2b406175b3ff Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 16 Jul 2013 16:50:08 -0700 Subject: drm/i915: Move active/inactive lists to new mm Shamelessly manipulated out of Daniel :-) "When moving the lists around explain that the active/inactive stuff is used by eviction when we run out of address space, so needs to be per-vma and per-address space. Bound/unbound otoh is used by the shrinker which only cares about the amount of memory used and not one bit about in which address space this memory is all used in. Of course to actual kick out an object we need to unbind it from every address space, but for that we have the per-object list of vmas." v2: Leave the bound list as a global one. (Chris, indirectly) v3: Rebased with no i915_gtt_vm. In most places I added a new *vm local, since it will eventually be replaces by a vm argument. Put comment back inline, since it no longer makes sense to do otherwise. v4: Rebased on hangcheck/error state movement Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 16 +++++++----- drivers/gpu/drm/i915/i915_drv.h | 46 +++++++++++++++++----------------- drivers/gpu/drm/i915/i915_gem.c | 33 ++++++++++++------------ drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 18 ++++++------- drivers/gpu/drm/i915/i915_gem_stolen.c | 3 ++- drivers/gpu/drm/i915/i915_gpu_error.c | 8 +++--- 7 files changed, 67 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1c697c0ab7e5..a9246e9c5f9d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -135,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) uintptr_t list = (uintptr_t) node->info_ent->data; struct list_head *head; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count, ret; @@ -147,11 +148,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) switch (list) { case ACTIVE_LIST: seq_puts(m, "Active:\n"); - head = &dev_priv->mm.active_list; + head = &vm->active_list; break; case INACTIVE_LIST: seq_puts(m, "Inactive:\n"); - head = &dev_priv->mm.inactive_list; + head = &vm->inactive_list; break; default: mutex_unlock(&dev->struct_mutex); @@ -219,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) u32 count, mappable_count, purgeable_count; size_t size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_file *file; int ret; @@ -236,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.active_list, mm_list); + count_objects(&vm->active_list, mm_list); seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.inactive_list, mm_list); + count_objects(&vm->inactive_list, mm_list); seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); @@ -1625,6 +1627,7 @@ i915_drop_caches_set(void *data, u64 val) struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; + struct i915_address_space *vm = &dev_priv->gtt.base; int ret; DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); @@ -1645,7 +1648,8 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_retire_requests(dev); if (val & DROP_BOUND) { - list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) + list_for_each_entry_safe(obj, next, &vm->inactive_list, + mm_list) if (obj->pin_count == 0) { ret = i915_gem_object_unbind(obj); if (ret) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1e1664e8a599..ee21af3a17ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -458,6 +458,29 @@ struct i915_address_space { struct page *page; } scratch; + /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * LRU list of objects which are not in the ringbuffer and + * are ready to unbind, but are still in the GTT. + * + * last_rendering_seqno is 0 while an object is in this list. + * + * A reference is not held on the buffer while on this list, + * as merely being GTT-bound shouldn't prevent its being + * freed, and we'll pull it off the list in the free path. + */ + struct list_head inactive_list; + /* FIXME: Need a more generic return type */ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level); @@ -852,29 +875,6 @@ struct i915_gem_mm { struct shrinker inactive_shrinker; bool shrinker_no_lock_stealing; - /** - * List of objects currently involved in rendering. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_rendering_seqno - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. - */ - struct list_head active_list; - - /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * - * last_rendering_seqno is 0 while an object is in this list. - * - * A reference is not held on the buffer while on this list, - * as merely being GTT-bound shouldn't prevent its being - * freed, and we'll pull it off the list in the free path. - */ - struct list_head inactive_list; - /** LRU list of objects with fence regs on them. */ struct list_head fence_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8f37229a3b52..8830856bf3f9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1692,6 +1692,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, bool purgeable_only) { struct drm_i915_gem_object *obj, *next; + struct i915_address_space *vm = &dev_priv->gtt.base; long count = 0; list_for_each_entry_safe(obj, next, @@ -1705,9 +1706,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, } } - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { + list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) { if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_unbind(obj) == 0 && i915_gem_object_put_pages(obj) == 0) { @@ -1878,6 +1877,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); @@ -1890,7 +1890,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj->mm_list, &vm->active_list); list_move_tail(&obj->ring_list, &ring->active_list); obj->last_read_seqno = seqno; @@ -1914,11 +1914,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_move_tail(&obj->mm_list, &vm->inactive_list); list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2270,6 +2271,7 @@ static void i915_gem_reset_fences(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; struct intel_ring_buffer *ring; int i; @@ -2280,12 +2282,8 @@ void i915_gem_reset(struct drm_device *dev) /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ - list_for_each_entry(obj, - &dev_priv->mm.inactive_list, - mm_list) - { + list_for_each_entry(obj, &vm->inactive_list, mm_list) obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; - } /* The fence registers are invalidated so clear them out */ i915_gem_reset_fences(dev); @@ -3076,6 +3074,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; size_t gtt_max = map_and_fenceable ? @@ -3151,7 +3150,7 @@ search_free: } list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_add_tail(&obj->mm_list, &vm->inactive_list); fenceable = i915_gem_obj_ggtt_size(obj) == fence_size && @@ -3299,7 +3298,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /* And bump the LRU for this access */ if (i915_gem_object_is_inactive(obj)) - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_move_tail(&obj->mm_list, + &dev_priv->gtt.base.inactive_list); return 0; } @@ -4242,7 +4242,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } - BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4320,8 +4320,8 @@ i915_gem_load(struct drm_device *dev) SLAB_HWCACHE_ALIGN, NULL); - INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->gtt.base.active_list); + INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); @@ -4591,6 +4591,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; bool unlock = true; @@ -4619,7 +4620,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) + list_for_each_entry(obj, &vm->inactive_list, global_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 582e6a5f3dac..bf945a39fbb1 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev) } } - list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) { if (obj->base.dev != dev || !atomic_read(&obj->base.refcount.refcount)) { DRM_ERROR("freed inactive %p\n", obj); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index f1c9ab096b00..43b82350d8dc 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -47,6 +47,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, bool mappable, bool nonblocking) { drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj; int ret = 0; @@ -78,15 +79,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) - drm_mm_init_scan_with_range(&dev_priv->gtt.base.mm, min_size, + drm_mm_init_scan_with_range(&vm->mm, min_size, alignment, cache_level, 0, dev_priv->gtt.mappable_end); else - drm_mm_init_scan(&dev_priv->gtt.base.mm, min_size, alignment, - cache_level); + drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { + list_for_each_entry(obj, &vm->inactive_list, mm_list) { if (mark_free(obj, &unwind_list)) goto found; } @@ -95,7 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + list_for_each_entry(obj, &vm->active_list, mm_list) { if (mark_free(obj, &unwind_list)) goto found; } @@ -154,12 +154,13 @@ int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj, *next; bool lists_empty; int ret; - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.active_list)); + lists_empty = (list_empty(&vm->inactive_list) && + list_empty(&vm->active_list)); if (lists_empty) return -ENOSPC; @@ -176,8 +177,7 @@ i915_gem_evict_everything(struct drm_device *dev) i915_gem_retire_requests(dev); /* Having flushed everything, unbind() should never raise an error */ - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, mm_list) + list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) if (obj->pin_count == 0) WARN_ON(i915_gem_object_unbind(obj)); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 5d38cb0cd1ce..90a618335db9 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -348,6 +348,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 size) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; @@ -408,7 +409,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, obj->has_global_gtt_mapping = 1; list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_add_tail(&obj->mm_list, &vm->inactive_list); return obj; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 58386cebb865..d970d84da65f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -622,6 +622,7 @@ static struct drm_i915_error_object * i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; u32 seqno; @@ -641,7 +642,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, } seqno = ring->get_seqno(ring, false); - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + list_for_each_entry(obj, &vm->active_list, mm_list) { if (obj->ring != ring) continue; @@ -773,11 +774,12 @@ static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; int i; i = 0; - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) + list_for_each_entry(obj, &vm->active_list, mm_list) i++; error->active_bo_count = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) @@ -797,7 +799,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, error->active_bo_count = capture_active_bo(error->active_bo, error->active_bo_count, - &dev_priv->mm.active_list); + &vm->active_list); if (error->pinned_bo) error->pinned_bo_count = -- cgit v1.2.3 From f7f181843e6c24644b4b71b8631a5ea87de05158 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 17 Jul 2013 12:19:02 -0700 Subject: drm/i915: Free stolen node on failed preallocation The odds of this happening are *extremely* unlikely. Reported-by: Imre Deak Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 90a618335db9..cb527e7930ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -402,7 +402,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, &obj->gtt_space); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - goto unref_out; + goto err_out; } } @@ -413,7 +413,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, return obj; -unref_out: +err_out: + drm_mm_put_block(stolen); drm_gem_object_unreference(&obj->base); return NULL; } -- cgit v1.2.3 From 2f63315692b1d3c055972ad33fc7168ae908b97b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 17 Jul 2013 12:19:03 -0700 Subject: drm/i915: Create VMAs Formerly: "drm/i915: Create VMAs (part 1)" In a previous patch, the notion of a VM was introduced. A VMA describes an area of part of the VM address space. A VMA is similar to the concept in the linux mm. However, instead of representing regular memory, a VMA is backed by a GEM BO. There may be many VMAs for a given object, one for each VM the object is to be used in. This may occur through flink, dma-buf, or a number of other transient states. Currently the code depends on only 1 VMA per object, for the global GTT (and aliasing PPGTT). The following patches will address this and make the rest of the infrastructure more suited v2: s/i915_obj/i915_gem_obj (Chris) v3: Only move an object to the now global unbound list if there are no more VMAs for the object which are bound into a VM (ie. the list is empty). v4: killed obj->gtt_space some reworks due to rebase v5: Free vma on error path (Imre) v6: Another missed vma free in i915_gem_object_bind_to_gtt error path (Imre) Fixed vma freeing in stolen preallocation (Imre) Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak [danvet: Squash in fixup from Ben to not deref a non-existing vma in set_cache_level, reported by Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 48 +++++++++++++++++----- drivers/gpu/drm/i915/i915_gem.c | 74 +++++++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_gem_evict.c | 12 ++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 ++- drivers/gpu/drm/i915/i915_gem_stolen.c | 15 +++++-- 5 files changed, 120 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee21af3a17ac..bd1b95ea6a28 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -533,6 +533,17 @@ struct i915_hw_ppgtt { int (*enable)(struct drm_device *dev); }; +/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + + struct list_head vma_link; /* Link in the object's VMA list */ +}; + struct i915_ctx_hang_stats { /* This context had batch pending when hang was declared */ unsigned batch_pending; @@ -1229,8 +1240,9 @@ struct drm_i915_gem_object { const struct drm_i915_gem_object_ops *ops; - /** Current space allocated to this object in the GTT, if any. */ - struct drm_mm_node gtt_space; + /** List of VMAs backed by this object */ + struct list_head vma_list; + /** Stolen memory for this object, instead of being backed by shmem. */ struct drm_mm_node *stolen; struct list_head global_list; @@ -1356,18 +1368,32 @@ struct drm_i915_gem_object { #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -/* Offset of the first PTE pointing to this object */ -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) +/* This is a temporary define to help transition us to real VMAs. If you see + * this, you're either reviewing code, or bisecting it. */ +static inline struct i915_vma * +__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj) { - return o->gtt_space.start; + if (list_empty(&obj->vma_list)) + return NULL; + return list_first_entry(&obj->vma_list, struct i915_vma, vma_link); } /* Whether or not this object is currently mapped by the translation tables */ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) { - return drm_mm_node_allocated(&o->gtt_space); + struct i915_vma *vma = __i915_gem_obj_to_vma(o); + if (vma == NULL) + return false; + return drm_mm_node_allocated(&vma->node); +} + +/* Offset of the first PTE pointing to this object */ +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) +{ + BUG_ON(list_empty(&o->vma_list)); + return __i915_gem_obj_to_vma(o)->node.start; } /* The size used in the translation tables may be larger than the actual size of @@ -1377,14 +1403,15 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) static inline unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) { - return o->gtt_space.size; + BUG_ON(list_empty(&o->vma_list)); + return __i915_gem_obj_to_vma(o)->node.size; } static inline void i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, enum i915_cache_level color) { - o->gtt_space.color = color; + __i915_gem_obj_to_vma(o)->node.color = color; } /** @@ -1691,6 +1718,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +void i915_gem_vma_destroy(struct i915_vma *vma); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8830856bf3f9..bd8c0086d0c1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2587,6 +2587,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + struct i915_vma *vma; int ret; if (!i915_gem_obj_ggtt_bound(obj)) @@ -2624,11 +2625,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); list_del(&obj->mm_list); - list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; - drm_mm_remove_node(&obj->gtt_space); + vma = __i915_gem_obj_to_vma(obj); + list_del(&vma->vma_link); + drm_mm_remove_node(&vma->node); + i915_gem_vma_destroy(vma); + + /* Since the unbound list is global, only move to that list if + * no more VMAs exist. + * NB: Until we have real VMAs there will only ever be one */ + WARN_ON(!list_empty(&obj->vma_list)); + if (list_empty(&obj->vma_list)) + list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); return 0; } @@ -3079,8 +3089,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, bool mappable, fenceable; size_t gtt_max = map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.base.total; + struct i915_vma *vma; int ret; + if (WARN_ON(!list_empty(&obj->vma_list))) + return -EBUSY; + fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3119,9 +3133,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); + vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); + if (vma == NULL) { + i915_gem_object_unpin_pages(obj); + return -ENOMEM; + } + search_free: ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, - &obj->gtt_space, + &vma->node, size, alignment, obj->cache_level, 0, gtt_max); if (ret) { @@ -3132,25 +3152,21 @@ search_free: if (ret == 0) goto search_free; - i915_gem_object_unpin_pages(obj); - return ret; + goto err_out; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space, + if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, obj->cache_level))) { - i915_gem_object_unpin_pages(obj); - drm_mm_remove_node(&obj->gtt_space); - return -EINVAL; + ret = -EINVAL; + goto err_out; } ret = i915_gem_gtt_prepare_object(obj); - if (ret) { - i915_gem_object_unpin_pages(obj); - drm_mm_remove_node(&obj->gtt_space); - return ret; - } + if (ret) + goto err_out; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &vm->inactive_list); + list_add(&vma->vma_link, &obj->vma_list); fenceable = i915_gem_obj_ggtt_size(obj) == fence_size && @@ -3164,6 +3180,12 @@ search_free: trace_i915_gem_object_bind(obj, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; + +err_out: + i915_gem_vma_destroy(vma); + i915_gem_object_unpin_pages(obj); + drm_mm_remove_node(&vma->node); + return ret; } void @@ -3309,6 +3331,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_vma *vma = __i915_gem_obj_to_vma(obj); int ret; if (obj->cache_level == cache_level) @@ -3319,7 +3342,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) { + if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { ret = i915_gem_object_unbind(obj); if (ret) return ret; @@ -3864,6 +3887,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); + INIT_LIST_HEAD(&obj->vma_list); obj->ops = ops; @@ -3984,6 +4008,26 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) i915_gem_object_free(obj); } +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&vma->vma_link); + vma->vm = vm; + vma->obj = obj; + + return vma; +} + +void i915_gem_vma_destroy(struct i915_vma *vma) +{ + WARN_ON(vma->node.allocated); + kfree(vma); +} + int i915_gem_idle(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43b82350d8dc..df61f338dea1 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -34,11 +34,13 @@ static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { + struct i915_vma *vma = __i915_gem_obj_to_vma(obj); + if (obj->pin_count) return false; list_add(&obj->exec_list, unwind); - return drm_mm_scan_add_block(&obj->gtt_space); + return drm_mm_scan_add_block(&vma->node); } int @@ -49,6 +51,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, drm_i915_private_t *dev_priv = dev->dev_private; struct i915_address_space *vm = &dev_priv->gtt.base; struct list_head eviction_list, unwind_list; + struct i915_vma *vma; struct drm_i915_gem_object *obj; int ret = 0; @@ -106,8 +109,8 @@ none: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - - ret = drm_mm_scan_remove_block(&obj->gtt_space); + vma = __i915_gem_obj_to_vma(obj); + ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); list_del_init(&obj->exec_list); @@ -127,7 +130,8 @@ found: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - if (drm_mm_scan_remove_block(&obj->gtt_space)) { + vma = __i915_gem_obj_to_vma(obj); + if (drm_mm_scan_remove_block(&vma->node)) { list_move(&obj->exec_list, &eviction_list); drm_gem_object_reference(&obj->base); continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 999ecfecb32e..3b639a94dddf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -662,16 +662,17 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + struct i915_vma *vma = __i915_gem_obj_to_vma(obj); int ret; DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", i915_gem_obj_ggtt_offset(obj), obj->base.size); WARN_ON(i915_gem_obj_ggtt_bound(obj)); - ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, - &obj->gtt_space); + ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); if (ret) DRM_DEBUG_KMS("Reservation failed\n"); obj->has_global_gtt_mapping = 1; + list_add(&vma->vma_link, &obj->vma_list); } dev_priv->gtt.base.start = start; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index cb527e7930ad..fc894f7d32e1 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -351,6 +351,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; + struct i915_vma *vma; int ret; if (dev_priv->mm.stolen_base == 0) @@ -390,18 +391,24 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; + vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); + if (!vma) { + ret = -ENOMEM; + goto err_out; + } + /* To simplify the initialisation sequence between KMS and GTT, * we allow construction of the stolen object prior to * setting up the GTT space. The actual reservation will occur * later. */ - obj->gtt_space.start = gtt_offset; - obj->gtt_space.size = size; + vma->node.start = gtt_offset; + vma->node.size = size; if (drm_mm_initialized(&dev_priv->gtt.base.mm)) { - ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, - &obj->gtt_space); + ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); + i915_gem_vma_destroy(vma); goto err_out; } } -- cgit v1.2.3 From 52604b1ffabac61eb07cce711f18e18ac74fbeae Mon Sep 17 00:00:00 2001 From: Shobhit Kumar Date: Thu, 11 Jul 2013 18:44:55 -0300 Subject: drm: Added SDP and VSC structures for handling PSR for eDP SDP header and SDP VSC header as per eDP 1.3 spec, section 3.5, chapter "PSR Secondary Data Package Support". v2: Modified and corrected the structures to be more in line for kernel coding guidelines and rebased the code on Paulo's DP patchset v3: removing unecessary identation at DP_RECEIVER_CAP_SIZE v4: moving them to include/drm/drm_dp_helper.h and also already icluding EDP_PSR_RECEIVER_CAP_SIZE to add everything needed for PSR at once at drm_dp_helper.h v5: Fix SDP VSC header and identation by (Paulo Zanoni) and remove i915 from title (Daniel Vetter) v6: Fix spec version and move comments from code to commit message since numbers might change in the future (by Paulo Zanoni). CC: Paulo Zanoni Reviewed-by: Paulo Zanoni Signed-off-by: Sateesh Kavuri Signed-off-by: Shobhit Kumar Signed-off-by: Rodrigo Vivi Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- include/drm/drm_dp_helper.h | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index e8e1417af3d9..ae8dbfb1207c 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], int lane); -#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_RECEIVER_CAP_SIZE 0xf +#define EDP_PSR_RECEIVER_CAP_SIZE 2 + void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); +struct edp_sdp_header { + u8 HB0; /* Secondary Data Packet ID */ + u8 HB1; /* Secondary Data Packet Type */ + u8 HB2; /* 7:5 reserved, 4:0 revision number */ + u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */ +} __packed; + +#define EDP_SDP_HEADER_REVISION_MASK 0x1F +#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F + +struct edp_vsc_psr { + struct edp_sdp_header sdp_header; + u8 DB0; /* Stereo Interface */ + u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */ + u8 DB2; /* CRC value bits 7:0 of the R or Cr component */ + u8 DB3; /* CRC value bits 15:8 of the R or Cr component */ + u8 DB4; /* CRC value bits 7:0 of the G or Y component */ + u8 DB5; /* CRC value bits 15:8 of the G or Y component */ + u8 DB6; /* CRC value bits 7:0 of the B or Cb component */ + u8 DB7; /* CRC value bits 15:8 of the B or Cb component */ + u8 DB8_31[24]; /* Reserved */ +} __packed; + +#define EDP_VSC_PSR_STATE_ACTIVE (1<<0) +#define EDP_VSC_PSR_UPDATE_RFB (1<<1) +#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) + static inline int drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { -- cgit v1.2.3 From 2293bb5c0383f522ac659946ccfadb0e6d2f03c5 Mon Sep 17 00:00:00 2001 From: Shobhit Kumar Date: Thu, 11 Jul 2013 18:44:56 -0300 Subject: drm/i915: Read the EDP DPCD and PSR Capability v2: reuse of just created is_edp_psr and put it at right place. v3: move is_edp_psr above intel_edp_disable v4: remove parentheses. Noticed by Paulo. Reviewed-by: Paulo Zanoni Reviewed-by: Jani Nikula Signed-off-by: Shobhit Kumar Signed-off-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 13 +++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 11eb697dec01..fe206a4a0f9e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1364,6 +1364,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } +static bool is_edp_psr(struct intel_dp *intel_dp) +{ + return is_edp(intel_dp) && + intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; +} + static void intel_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -2277,6 +2283,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] == 0) return false; /* DPCD not present */ + /* Check if the panel supports PSR */ + memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); + intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, + intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + if (is_edp_psr(intel_dp)) + DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5dfc1a0f2351..d25726d5307f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -487,6 +487,7 @@ struct intel_dp { uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; -- cgit v1.2.3 From b84a1cf8950ed075c4ab2630514d4caaae504176 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:44:57 -0300 Subject: drm/i915: split aux_clock_divider logic in a separated function for reuse. Prep patch for reuse aux_clock_divider with EDP_PSR_AUX_CTL setup. Reviewed-by: Paulo Zanoni Signed-off-by: Rodrigo Vivi Reviewed-by: Shobhit Kumar Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 58 +++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fe206a4a0f9e..1ef83dc26de0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -271,29 +271,12 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) return status; } -static int -intel_dp_aux_ch(struct intel_dp *intel_dp, - uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size) +static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; - uint32_t ch_data = ch_ctl + 4; - int i, ret, recv_bytes; - uint32_t status; - uint32_t aux_clock_divider; - int try, precharge; - bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); - /* dp aux is extremely sensitive to irq latency, hence request the - * lowest possible wakeup latency and so prevent the cpu from going into - * deep sleep states. - */ - pm_qos_update_request(&dev_priv->pm_qos, 0); - - intel_dp_check_edp(intel_dp); /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that @@ -302,23 +285,48 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * clock divider. */ if (IS_VALLEYVIEW(dev)) { - aux_clock_divider = 100; + return 100; } else if (intel_dig_port->port == PORT_A) { if (HAS_DDI(dev)) - aux_clock_divider = DIV_ROUND_CLOSEST( + return DIV_ROUND_CLOSEST( intel_ddi_get_cdclk_freq(dev_priv), 2000); else if (IS_GEN6(dev) || IS_GEN7(dev)) - aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ + return 200; /* SNB & IVB eDP input clock at 400Mhz */ else - aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + return 225; /* eDP input clock at 450Mhz */ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { /* Workaround for non-ULT HSW */ - aux_clock_divider = 74; + return 74; } else if (HAS_PCH_SPLIT(dev)) { - aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); } else { - aux_clock_divider = intel_hrawclk(dev) / 2; + return intel_hrawclk(dev) / 2; } +} + +static int +intel_dp_aux_ch(struct intel_dp *intel_dp, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; + uint32_t ch_data = ch_ctl + 4; + int i, ret, recv_bytes; + uint32_t status; + uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp); + int try, precharge; + bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); + + /* dp aux is extremely sensitive to irq latency, hence request the + * lowest possible wakeup latency and so prevent the cpu from going into + * deep sleep states. + */ + pm_qos_update_request(&dev_priv->pm_qos, 0); + + intel_dp_check_edp(intel_dp); if (IS_GEN6(dev)) precharge = 3; -- cgit v1.2.3 From 2b28bb1b6440fadececc4cf8f29c55d510c6db09 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:44:58 -0300 Subject: drm/i915: Enable/Disable PSR Adding Enable and Disable PSR functionalities. This includes setting the PSR configuration over AUX, sending SDP VSC DIP over the eDP PIPE config, enabling PSR in the sink via DPCD register and finally enabling PSR on the host. This patch is based on initial PSR code by Sateesh Kavuri and Kumar Shobhit but in a different implementation. v2: * moved functions around and changed its names. * removed VSC DIP unset from disable. * remove FBC wa. * don't mask LSPS anymore. * incorporate new crtc usage after a rebase. v3: Make a clear separation between Sink (Panel) and Source (HW) enabling. v4: Fix identation and other style issues raised by checkpatch (by Paulo). v5: Changes according to Paulo's review: static on write_vsc; avoid using dp_to_dev when already calling dp_to_dig_port; remove unecessary TP default time setting; remove unecessary interrupts disabling; remove unecessary wait_for_vblank when disabling psr; v6: remove unecessary wait_for_vblank when writing vsc; v7: adding setup once function to avoid unnecessarily write to vsc and set debug_ctl every time we enable or disable psr. Cc: Paulo Zanoni Credits-by: Sateesh Kavuri Credits-by: Shobhit Kumar Signed-off-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Reviewed-by: Shobhit Kumar [danvet: Apply Paulo's suggestion for unconditionally clearing the control register when writing the DIP.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 42 +++++++++++ drivers/gpu/drm/i915/intel_dp.c | 149 +++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 4 ++ 3 files changed, 195 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5e58a44c5fe3..56cb79d4a980 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1779,6 +1779,47 @@ #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) +/* HSW eDP PSR registers */ +#define EDP_PSR_CTL 0x64800 +#define EDP_PSR_ENABLE (1<<31) +#define EDP_PSR_LINK_DISABLE (0<<27) +#define EDP_PSR_LINK_STANDBY (1<<27) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) +#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 +#define EDP_PSR_SKIP_AUX_EXIT (1<<12) +#define EDP_PSR_TP1_TP2_SEL (0<<11) +#define EDP_PSR_TP1_TP3_SEL (1<<11) +#define EDP_PSR_TP2_TP3_TIME_500us (0<<8) +#define EDP_PSR_TP2_TP3_TIME_100us (1<<8) +#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) +#define EDP_PSR_TP2_TP3_TIME_0us (3<<8) +#define EDP_PSR_TP1_TIME_500us (0<<4) +#define EDP_PSR_TP1_TIME_100us (1<<4) +#define EDP_PSR_TP1_TIME_2500us (2<<4) +#define EDP_PSR_TP1_TIME_0us (3<<4) +#define EDP_PSR_IDLE_FRAME_SHIFT 0 + +#define EDP_PSR_AUX_CTL 0x64810 +#define EDP_PSR_AUX_DATA1 0x64814 +#define EDP_PSR_DPCD_COMMAND 0x80060000 +#define EDP_PSR_AUX_DATA2 0x64818 +#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) +#define EDP_PSR_AUX_DATA3 0x6481c +#define EDP_PSR_AUX_DATA4 0x64820 +#define EDP_PSR_AUX_DATA5 0x64824 + +#define EDP_PSR_STATUS_CTL 0x64840 +#define EDP_PSR_STATUS_STATE_MASK (7<<29) + +#define EDP_PSR_DEBUG_CTL 0x64860 +#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) +#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) +#define EDP_PSR_DEBUG_MASK_HPD (1<<25) + /* VGA port control */ #define ADPA 0x61100 #define PCH_ADPA 0xe1100 @@ -2048,6 +2089,7 @@ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 +#define VIDEO_DIP_VSC_DATA_SIZE 36 #define VIDEO_DIP_CTL 0x61170 /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1ef83dc26de0..bb3593db42e9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1378,6 +1378,153 @@ static bool is_edp_psr(struct intel_dp *intel_dp) intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; } +static bool intel_edp_is_psr_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!IS_HASWELL(dev)) + return false; + + return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; +} + +static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, + struct edp_vsc_psr *vsc_psr) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); + u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); + uint32_t *data = (uint32_t *) vsc_psr; + unsigned int i; + + /* As per BSPec (Pipe Video Data Island Packet), we need to disable + the video DIP being updated before program video DIP data buffer + registers for DIP being updated. */ + I915_WRITE(ctl_reg, 0); + POSTING_READ(ctl_reg); + + for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { + if (i < sizeof(struct edp_vsc_psr)) + I915_WRITE(data_reg + i, *data++); + else + I915_WRITE(data_reg + i, 0); + } + + I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); + POSTING_READ(ctl_reg); +} + +static void intel_edp_psr_setup(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + struct edp_vsc_psr psr_vsc; + + if (intel_dp->psr_setup_done) + return; + + /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + intel_edp_psr_write_vsc(intel_dp, &psr_vsc); + + /* Avoid continuous PSR exit by masking memup and hpd */ + I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD); + + intel_dp->psr_setup_done = true; +} + +static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp); + int precharge = 0x3; + int msg_size = 5; /* Header(4) + Message(1) */ + + /* Enable PSR in sink */ + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) + intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, + DP_PSR_ENABLE & + ~DP_PSR_MAIN_LINK_ACTIVE); + else + intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, + DP_PSR_ENABLE | + DP_PSR_MAIN_LINK_ACTIVE); + + /* Setup AUX registers */ + I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); + I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); + I915_WRITE(EDP_PSR_AUX_CTL, + DP_AUX_CH_CTL_TIME_OUT_400us | + (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); +} + +static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t max_sleep_time = 0x1f; + uint32_t idle_frames = 1; + uint32_t val = 0x0; + + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { + val |= EDP_PSR_LINK_STANDBY; + val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_SKIP_AUX_EXIT; + } else + val |= EDP_PSR_LINK_DISABLE; + + I915_WRITE(EDP_PSR_CTL, val | + EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | + max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | + idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | + EDP_PSR_ENABLE); +} + +void intel_edp_psr_enable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (!is_edp_psr(intel_dp) || intel_edp_is_psr_enabled(dev)) + return; + + /* Setup PSR once */ + intel_edp_psr_setup(intel_dp); + + /* Enable PSR on the panel */ + intel_edp_psr_enable_sink(intel_dp); + + /* Enable PSR on the host */ + intel_edp_psr_enable_source(intel_dp); +} + +void intel_edp_psr_disable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!intel_edp_is_psr_enabled(dev)) + return; + + I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); + + /* Wait till PSR is idle */ + if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & + EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) + DRM_ERROR("Timed out waiting for PSR Idle State\n"); +} + static void intel_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -3189,6 +3336,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", error, port_name(port)); + intel_dp->psr_setup_done = false; + if (!intel_edp_init_connector(intel_dp, intel_connector)) { i2c_del_adapter(&intel_dp->adapter); if (is_edp(intel_dp)) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d25726d5307f..ff36a40103eb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -499,6 +499,7 @@ struct intel_dp { int backlight_off_delay; struct delayed_work panel_vdd_work; bool want_panel_vdd; + bool psr_setup_done; struct intel_connector *attached_connector; }; @@ -834,4 +835,7 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable); +extern void intel_edp_psr_enable(struct intel_dp *intel_dp); +extern void intel_edp_psr_disable(struct intel_dp *intel_dp); + #endif /* __INTEL_DRV_H__ */ -- cgit v1.2.3 From e91fd8c6dec2ffa903b4f695fce4b9d7248ed2d5 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:44:59 -0300 Subject: drm/i915: Added debugfs support for PSR Status Adding support for PSR Status, PSR entry counter and performance counters. Heavily based on initial work from Shobhit. v2: Fix PSR Status Link bits by Paulo Zanoni. v3: Prefer seq_puts to seq_printf by Paulo Zanoni. v4: Fix identation by Paulo Zanoni. v5: Return earlier if it isn't Haswell in order to avoid reading non-existing registers - by Paulo Zanoni. CC: Paulo Zanoni Reviewed-by: Paulo Zanoni Credits-by: Shobhit Kumar Signed-off-by: Rodrigo Vivi Reviewed-by: Shobhit Kumar Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 95 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 24 ++++++++++ 2 files changed, 119 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a9246e9c5f9d..65619e6fde86 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1545,6 +1545,100 @@ static int i915_llc(struct seq_file *m, void *data) return 0; } +static int i915_edp_psr_status(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 psrctl, psrstat, psrperf; + + if (!IS_HASWELL(dev)) { + seq_puts(m, "PSR not supported on this platform\n"); + return 0; + } + + psrctl = I915_READ(EDP_PSR_CTL); + seq_printf(m, "PSR Enabled: %s\n", + yesno(psrctl & EDP_PSR_ENABLE)); + + psrstat = I915_READ(EDP_PSR_STATUS_CTL); + + seq_puts(m, "PSR Current State: "); + switch (psrstat & EDP_PSR_STATUS_STATE_MASK) { + case EDP_PSR_STATUS_STATE_IDLE: + seq_puts(m, "Reset state\n"); + break; + case EDP_PSR_STATUS_STATE_SRDONACK: + seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n"); + break; + case EDP_PSR_STATUS_STATE_SRDENT: + seq_puts(m, "SRD entry\n"); + break; + case EDP_PSR_STATUS_STATE_BUFOFF: + seq_puts(m, "Wait for buffer turn off\n"); + break; + case EDP_PSR_STATUS_STATE_BUFON: + seq_puts(m, "Wait for buffer turn on\n"); + break; + case EDP_PSR_STATUS_STATE_AUXACK: + seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n"); + break; + case EDP_PSR_STATUS_STATE_SRDOFFACK: + seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n"); + break; + default: + seq_puts(m, "Unknown\n"); + break; + } + + seq_puts(m, "Link Status: "); + switch (psrstat & EDP_PSR_STATUS_LINK_MASK) { + case EDP_PSR_STATUS_LINK_FULL_OFF: + seq_puts(m, "Link is fully off\n"); + break; + case EDP_PSR_STATUS_LINK_FULL_ON: + seq_puts(m, "Link is fully on\n"); + break; + case EDP_PSR_STATUS_LINK_STANDBY: + seq_puts(m, "Link is in standby\n"); + break; + default: + seq_puts(m, "Unknown\n"); + break; + } + + seq_printf(m, "PSR Entry Count: %u\n", + psrstat >> EDP_PSR_STATUS_COUNT_SHIFT & + EDP_PSR_STATUS_COUNT_MASK); + + seq_printf(m, "Max Sleep Timer Counter: %u\n", + psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT & + EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK); + + seq_printf(m, "Had AUX error: %s\n", + yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR)); + + seq_printf(m, "Sending AUX: %s\n", + yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING)); + + seq_printf(m, "Sending Idle: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE)); + + seq_printf(m, "Sending TP2 TP3: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3)); + + seq_printf(m, "Sending TP1: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1)); + + seq_printf(m, "Idle Count: %u\n", + psrstat & EDP_PSR_STATUS_IDLE_MASK); + + psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance Counter: %u\n", psrperf); + + return 0; +} + static int i915_wedged_get(void *data, u64 *val) { @@ -1977,6 +2071,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_dpio", i915_dpio_info, 0}, {"i915_llc", i915_llc, 0}, + {"i915_edp_psr_status", i915_edp_psr_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 56cb79d4a980..bb898bfe053d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1814,6 +1814,30 @@ #define EDP_PSR_STATUS_CTL 0x64840 #define EDP_PSR_STATUS_STATE_MASK (7<<29) +#define EDP_PSR_STATUS_STATE_IDLE (0<<29) +#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) +#define EDP_PSR_STATUS_STATE_SRDENT (2<<29) +#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) +#define EDP_PSR_STATUS_STATE_BUFON (4<<29) +#define EDP_PSR_STATUS_STATE_AUXACK (5<<29) +#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) +#define EDP_PSR_STATUS_LINK_MASK (3<<26) +#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) +#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) +#define EDP_PSR_STATUS_LINK_STANDBY (2<<26) +#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 +#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f +#define EDP_PSR_STATUS_COUNT_SHIFT 16 +#define EDP_PSR_STATUS_COUNT_MASK 0xf +#define EDP_PSR_STATUS_AUX_ERROR (1<<15) +#define EDP_PSR_STATUS_AUX_SENDING (1<<12) +#define EDP_PSR_STATUS_SENDING_IDLE (1<<9) +#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) +#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) +#define EDP_PSR_STATUS_IDLE_MASK 0xf + +#define EDP_PSR_PERF_CNT 0x64844 +#define EDP_PSR_PERF_CNT_MASK 0xffffff #define EDP_PSR_DEBUG_CTL 0x64860 #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) -- cgit v1.2.3 From 3f51e4713fc57ab0fc225c3f0e67578a53c24a11 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:45:00 -0300 Subject: drm/i915: Match all PSR mode entry conditions before enabling it. v2: Prefer seq_puts to seq_printf by Paulo Zanoni. v3: small changes like avoiding calling dp_to_dig_port twice as noticed by Paulo Zanoni. v4: Avoiding reading non-existent registers - noticed by Paulo on first psr debugfs patch. v5: Accepting more suggestions from Paulo: * check sw interlace flag instead of i915_read * introduce PSR_S3D_ENABLED to avoid forgeting it whenever added. Cc: Paulo Zanoni Signed-off-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Reviewed-by: Shobhit Kumar [danvet: Fix up debugfs output (spotted by Paulo) and rip out the power well check since we really can't do that in a race-free manner, so it's bogus.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 42 ++++++++++++++++++++--- drivers/gpu/drm/i915/i915_drv.h | 13 +++++++ drivers/gpu/drm/i915/i915_reg.h | 7 ++++ drivers/gpu/drm/i915/intel_dp.c | 67 ++++++++++++++++++++++++++++++++++++- 4 files changed, 123 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 65619e6fde86..973f2727d703 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1550,17 +1550,49 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 psrctl, psrstat, psrperf; + u32 psrstat, psrperf; if (!IS_HASWELL(dev)) { seq_puts(m, "PSR not supported on this platform\n"); + } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) { + seq_puts(m, "PSR enabled\n"); + } else { + seq_puts(m, "PSR disabled: "); + switch (dev_priv->no_psr_reason) { + case PSR_NO_SOURCE: + seq_puts(m, "not supported on this platform"); + break; + case PSR_NO_SINK: + seq_puts(m, "not supported by panel"); + break; + case PSR_CRTC_NOT_ACTIVE: + seq_puts(m, "crtc not active"); + break; + case PSR_PWR_WELL_ENABLED: + seq_puts(m, "power well enabled"); + break; + case PSR_NOT_TILED: + seq_puts(m, "not tiled"); + break; + case PSR_SPRITE_ENABLED: + seq_puts(m, "sprite enabled"); + break; + case PSR_S3D_ENABLED: + seq_puts(m, "stereo 3d enabled"); + break; + case PSR_INTERLACED_ENABLED: + seq_puts(m, "interlaced enabled"); + break; + case PSR_HSW_NOT_DDIA: + seq_puts(m, "HSW ties PSR to DDI A (eDP)"); + break; + default: + seq_puts(m, "unknown reason"); + } + seq_puts(m, "\n"); return 0; } - psrctl = I915_READ(EDP_PSR_CTL); - seq_printf(m, "PSR Enabled: %s\n", - yesno(psrctl & EDP_PSR_ENABLE)); - psrstat = I915_READ(EDP_PSR_STATUS_CTL); seq_puts(m, "PSR Current State: "); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bd1b95ea6a28..21d55f855d16 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -593,6 +593,17 @@ struct i915_fbc { } no_fbc_reason; }; +enum no_psr_reason { + PSR_NO_SOURCE, /* Not supported on platform */ + PSR_NO_SINK, /* Not supported by panel */ + PSR_CRTC_NOT_ACTIVE, + PSR_PWR_WELL_ENABLED, + PSR_NOT_TILED, + PSR_SPRITE_ENABLED, + PSR_S3D_ENABLED, + PSR_INTERLACED_ENABLED, + PSR_HSW_NOT_DDIA, +}; enum intel_pch { PCH_NONE = 0, /* No PCH present */ @@ -1173,6 +1184,8 @@ typedef struct drm_i915_private { /* Haswell power well */ struct i915_power_well power_well; + enum no_psr_reason no_psr_reason; + struct i915_gpu_error gpu_error; struct drm_i915_gem_object *vlv_pctx; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bb898bfe053d..1d710966983e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4150,6 +4150,13 @@ #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) +#define HSW_STEREO_3D_CTL_A 0x70020 +#define S3D_ENABLE (1<<31) +#define HSW_STEREO_3D_CTL_B 0x71020 + +#define HSW_STEREO_3D_CTL(trans) \ + _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) + #define _PCH_TRANS_HTOTAL_B 0xe1000 #define _PCH_TRANS_HBLANK_B 0xe1004 #define _PCH_TRANS_HSYNC_B 0xe1008 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index bb3593db42e9..3ce1b872935e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1492,11 +1492,76 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) EDP_PSR_ENABLE); } +static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dig_port->base.base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; + struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + + if (!IS_HASWELL(dev)) { + DRM_DEBUG_KMS("PSR not supported on this platform\n"); + dev_priv->no_psr_reason = PSR_NO_SOURCE; + return false; + } + + if ((intel_encoder->type != INTEL_OUTPUT_EDP) || + (dig_port->port != PORT_A)) { + DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); + dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA; + return false; + } + + if (!is_edp_psr(intel_dp)) { + DRM_DEBUG_KMS("PSR not supported by this panel\n"); + dev_priv->no_psr_reason = PSR_NO_SINK; + return false; + } + + if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { + DRM_DEBUG_KMS("crtc not active for PSR\n"); + dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; + return false; + } + + if (obj->tiling_mode != I915_TILING_X || + obj->fence_reg == I915_FENCE_REG_NONE) { + DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); + dev_priv->no_psr_reason = PSR_NOT_TILED; + return false; + } + + if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { + DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); + dev_priv->no_psr_reason = PSR_SPRITE_ENABLED; + return false; + } + + if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & + S3D_ENABLE) { + DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); + dev_priv->no_psr_reason = PSR_S3D_ENABLED; + return false; + } + + if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { + DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); + dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; + return false; + } + + return true; +} + void intel_edp_psr_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - if (!is_edp_psr(intel_dp) || intel_edp_is_psr_enabled(dev)) + if (!intel_edp_psr_match_conditions(intel_dp) || + intel_edp_is_psr_enabled(dev)) return; /* Setup PSR once */ -- cgit v1.2.3 From 105b7c11f036f734988990541674a93e54cf4ec1 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:45:02 -0300 Subject: drm/intel: add enable_psr module option and disable psr by default v2: prefer seq_puts to seq_printf detected by Paulo Zanoni. v3: PSR is disabled by default. Without userspace ready it will cause regression for kde and xdm users Signed-off-by: Rodrigo Vivi Reviewed-by: Shobhit Kumar Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_dp.c | 6 ++++++ 4 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 973f2727d703..9d871c7eeaee 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1565,6 +1565,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) case PSR_NO_SINK: seq_puts(m, "not supported by panel"); break; + case PSR_MODULE_PARAM: + seq_puts(m, "disabled by flag"); + break; case PSR_CRTC_NOT_ACTIVE: seq_puts(m, "crtc not active"); break; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0485f435eeea..b178a7ca1294 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -118,6 +118,10 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); +int i915_enable_psr __read_mostly = 0; +module_param_named(enable_psr, i915_enable_psr, int, 0600); +MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); + unsigned int i915_preliminary_hw_support __read_mostly = 0; module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); MODULE_PARM_DESC(preliminary_hw_support, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21d55f855d16..36d1c806e092 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -596,6 +596,7 @@ struct i915_fbc { enum no_psr_reason { PSR_NO_SOURCE, /* Not supported on platform */ PSR_NO_SINK, /* Not supported by panel */ + PSR_MODULE_PARAM, PSR_CRTC_NOT_ACTIVE, PSR_PWR_WELL_ENABLED, PSR_NOT_TILED, @@ -1621,6 +1622,7 @@ extern int i915_enable_rc6 __read_mostly; extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; +extern int i915_enable_psr __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3ce1b872935e..6a4cdea76274 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1521,6 +1521,12 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } + if (!i915_enable_psr) { + DRM_DEBUG_KMS("PSR disable by flag\n"); + dev_priv->no_psr_reason = PSR_MODULE_PARAM; + return false; + } + if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { DRM_DEBUG_KMS("crtc not active for PSR\n"); dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; -- cgit v1.2.3 From 3d739d92d9cbce6cdaf101fe78870f97fcbf5349 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:45:01 -0300 Subject: drm/i915: add update function to disable/enable-back PSR Required function to disable PSR when going to console mode. But also can be used whenever PSR mode entry conditions changed. v2: Add it before PSR Hook. Update function not really been called yet. v3: Fix coding style detected by checkpatch by Paulo Zanoni. v4: do_enable must be static as Paulo noticed. Cc: Paulo Zanoni Signed-off-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Reviewed-by: Shobhit Kumar Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 31 ++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 1 + 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6a4cdea76274..2183b6f57a22 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1562,7 +1562,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return true; } -void intel_edp_psr_enable(struct intel_dp *intel_dp) +static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -1580,6 +1580,15 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp) intel_edp_psr_enable_source(intel_dp); } +void intel_edp_psr_enable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (intel_edp_psr_match_conditions(intel_dp) && + !intel_edp_is_psr_enabled(dev)) + intel_edp_psr_do_enable(intel_dp); +} + void intel_edp_psr_disable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -1596,6 +1605,26 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) DRM_ERROR("Timed out waiting for PSR Idle State\n"); } +void intel_edp_psr_update(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_dp *intel_dp = NULL; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) + if (encoder->type == INTEL_OUTPUT_EDP) { + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!is_edp_psr(intel_dp)) + return; + + if (!intel_edp_psr_match_conditions(intel_dp)) + intel_edp_psr_disable(intel_dp); + else + if (!intel_edp_is_psr_enabled(dev)) + intel_edp_psr_do_enable(intel_dp); + } +} + static void intel_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ff36a40103eb..40e955d00b2b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -837,5 +837,6 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, extern void intel_edp_psr_enable(struct intel_dp *intel_dp); extern void intel_edp_psr_disable(struct intel_dp *intel_dp); +extern void intel_edp_psr_update(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ -- cgit v1.2.3 From 4906557eb37b7fef84fad4304acef6dedf919880 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2013 18:45:05 -0300 Subject: drm/i915: Hook PSR functionality PSR must be enabled after transcoder and port are running. And it is only available for HSW. v2: move enable/disable to intel_ddi v3: The spec suggests PSR should be disabled even before backlight (by pzanoni) v4: also disabling and enabling whenever panel is disabled/enabled. v5: make it last patch to avoid breaking whenever bisecting. So calling for update and force exit came to this patch along with enable/disable calls. v6: Remove unused and unecessary psr_enable/disable calls, as notice by Paulo. CC: Paulo Zanoni Signed-off-by: Rodrigo Vivi [danvet: Drop the psr exit code in the busy ioctl since I didn't merge that part of the infrastructure yet - it needs more thought.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 2 ++ drivers/gpu/drm/i915/intel_display.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..421192542c31 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1117,6 +1117,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); + intel_edp_psr_enable(intel_dp); } if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { @@ -1147,6 +1148,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_edp_psr_disable(intel_dp); ironlake_edp_backlight_off(intel_dp); } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3cda57d7b3f0..94cb0ad604d7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2274,6 +2274,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } intel_update_fbc(dev); + intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); intel_crtc_update_sarea_pos(crtc, x, y); -- cgit v1.2.3 From 7984211ee8e1fa03cd4bc9ef3d347f94f8a2c8a8 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 18 Jul 2013 17:44:13 +0300 Subject: drm/i915: restore debug message lost in merge resolution Restore debug message lost in merge commit e1b73cba13. Also clarify it that we are only clamping bpp not overwriting it. Signed-off-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2183b6f57a22..d391ce38ec25 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -713,8 +713,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp_bpp); bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); + } for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); -- cgit v1.2.3 From 0ff066a9e4a29481226a6d46eab6bd9499aeaddb Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 14:19:36 -0300 Subject: drm/i915: remove SDV support from lpt_pch_init_refclk The machines that fall in the "is_sdv" case are some very early pre-production steppings. This patch may break VGA output after suspend/resume on these machines. Even the documentation for the is_sdv cases was removed from BSpec. Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 104 ++++++++++++----------------------- 1 file changed, 34 insertions(+), 70 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 94cb0ad604d7..46c4dff92900 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5177,7 +5177,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; bool has_vga = false; - bool is_sdv = false; u32 tmp; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { @@ -5193,10 +5192,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) mutex_lock(&dev_priv->dpio_lock); - /* XXX: Rip out SDV support once Haswell ships for real. */ - if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) - is_sdv = true; - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); tmp &= ~SBI_SSCCTL_DISABLE; tmp |= SBI_SSCCTL_PATHALT; @@ -5208,36 +5203,27 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp &= ~SBI_SSCCTL_PATHALT; intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - if (!is_sdv) { - tmp = I915_READ(SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + tmp = I915_READ(SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - DRM_ERROR("FDI mPHY reset assert timeout\n"); + if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + DRM_ERROR("FDI mPHY reset assert timeout\n"); - tmp = I915_READ(SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + tmp = I915_READ(SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, - 100)) - DRM_ERROR("FDI mPHY reset de-assert timeout\n"); - } + if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + DRM_ERROR("FDI mPHY reset de-assert timeout\n"); tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); tmp |= (0x12 << 24); intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); - if (is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); - tmp |= 0x7FFF; - intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); - } - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); @@ -5246,24 +5232,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); - if (is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); - tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); - intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); - tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); - intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); - tmp |= (0x3F << 8); - intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); - tmp |= (0x3F << 8); - intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); - } - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); @@ -5272,17 +5240,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); - if (!is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); - } + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); tmp &= ~0xFF; @@ -5304,25 +5270,23 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (0x1C << 16); intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); - if (!is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); - } + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); -- cgit v1.2.3 From f31f2d55eb77190e66cb13e5dd2beca7a91f8dd0 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 18 Jul 2013 18:51:11 -0300 Subject: drm/i915: extract FDI mPHY functions from lpt_init_pch_refclk Because lpt_init_pch_refclk implements the "Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O", which is very similar to "Sequence to enable CLKOUT_DP" and "Sequence to enable CLKOUT_DP without spread". With the extracted functions we can more easily implement the two missing sequences. v2: Rebase (WaMPhyProgramming:hsw comment). Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 79 ++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 46c4dff92900..502610530e35 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5167,41 +5167,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) BUG_ON(val != final); } -/* - * Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. - * WaMPhyProgramming:hsw - */ -static void lpt_init_pch_refclk(struct drm_device *dev) +static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *encoder; - bool has_vga = false; - u32 tmp; - - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_vga = true; - break; - } - } - - if (!has_vga) - return; - - mutex_lock(&dev_priv->dpio_lock); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_DISABLE; - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - udelay(24); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + uint32_t tmp; tmp = I915_READ(SOUTH_CHICKEN2); tmp |= FDI_MPHY_IOSFSB_RESET_CTL; @@ -5218,6 +5186,12 @@ static void lpt_init_pch_refclk(struct drm_device *dev) if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) DRM_ERROR("FDI mPHY reset de-assert timeout\n"); +} + +/* WaMPhyProgramming:hsw */ +static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) +{ + uint32_t tmp; tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); @@ -5287,6 +5261,43 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp &= ~(0xF << 28); tmp |= (4 << 28); intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); +} + +/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ +static void lpt_init_pch_refclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + bool has_vga = false; + u32 tmp; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_vga = true; + break; + } + } + + if (!has_vga) + return; + + mutex_lock(&dev_priv->dpio_lock); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + udelay(24); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + lpt_reset_fdi_mphy(dev_priv); + lpt_program_fdi_mphy(dev_priv); /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); -- cgit v1.2.3 From bf8fa3d383aa9eb0003419e40ad0f3667c810154 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 14:19:38 -0300 Subject: drm/i915: extract lpt_enable_clkout_dp from lpt_init_pch_refclk The next step is to modify lpt_enable_clkout_dp to enable support for "Sequence to enable CLKOUT_DP" and "Sequence to enable CLKOUT_DP without spread". Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 38 +++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 502610530e35..a8fc924e7788 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5264,24 +5264,10 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) } /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ -static void lpt_init_pch_refclk(struct drm_device *dev) +static void lpt_enable_clkout_dp(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *encoder; - bool has_vga = false; - u32 tmp; - - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_vga = true; - break; - } - } - - if (!has_vga) - return; + uint32_t tmp; mutex_lock(&dev_priv->dpio_lock); @@ -5307,6 +5293,26 @@ static void lpt_init_pch_refclk(struct drm_device *dev) mutex_unlock(&dev_priv->dpio_lock); } +static void lpt_init_pch_refclk(struct drm_device *dev) +{ + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + bool has_vga = false; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_vga = true; + break; + } + } + + if (!has_vga) + return; + + lpt_enable_clkout_dp(dev); +} + /* * Initialize reference clocks when the driver loads */ -- cgit v1.2.3 From db473b36d4a2eb02c65aefca11578698b3699fe0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 19 Jul 2013 08:45:46 +0300 Subject: drm/i915: checking for NULL instead of IS_ERR() i915_gem_vma_create() returns and ERR_PTR() or a valid pointer, it never returns NULL. Signed-off-by: Dan Carpenter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_stolen.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bd8c0086d0c1..56642d8eb937 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3134,9 +3134,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); - if (vma == NULL) { + if (IS_ERR(vma)) { i915_gem_object_unpin_pages(obj); - return -ENOMEM; + return PTR_ERR(vma); } search_free: diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index fc894f7d32e1..9a1896c86dcd 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -392,8 +392,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, return obj; vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); - if (!vma) { - ret = -ENOMEM; + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto err_out; } -- cgit v1.2.3 From 6286ef9b56bfc5d4f3f06ef5488e41da4480dc85 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 19 Jul 2013 08:46:27 +0300 Subject: drm/i915: use after free on error path i915_gem_vma_destroy() frees its argument so we have to move the drm_mm_remove_node() call up a few lines. Signed-off-by: Dan Carpenter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 56642d8eb937..ce043f14eab9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3182,9 +3182,9 @@ search_free: return 0; err_out: + drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); i915_gem_object_unpin_pages(obj); - drm_mm_remove_node(&vma->node); return ret; } -- cgit v1.2.3 From 0b74b508f78cea96d0d1b47e72cc0ec7959cdc68 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Fri, 19 Jul 2013 13:51:24 +0800 Subject: drm/i915: add prefault_disable module option prefault is stll enabled by default which prevent most of pwrite/pread/reloc from running slow path, in order to verify these slow pathes, prefault need to be disabled. Signed-off-by: Xiong Zhang [danvet: Make checkpatch happy and bikeshed the module option help text a bit.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 12 +++++++----- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 6 ++++-- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b178a7ca1294..c34086ad8181 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -141,6 +141,11 @@ module_param_named(fastboot, i915_fastboot, bool, 0600); MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " "(default: false)"); +bool i915_prefault_disable __read_mostly; +module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); +MODULE_PARM_DESC(prefault_disable, + "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); + static struct drm_driver driver; extern int intel_agp_enabled; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 36d1c806e092..fc32d2034f38 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1627,6 +1627,7 @@ extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; extern bool i915_fastboot __read_mostly; +extern bool i915_prefault_disable __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ce043f14eab9..acc99d458143 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -465,7 +465,7 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); - if (!prefaulted) { + if (likely(!i915_prefault_disable) && !prefaulted) { ret = fault_in_multipages_writeable(user_data, remain); /* Userspace is tricking us, but we've already clobbered * its pages with the prefault and promised to write the @@ -860,10 +860,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), - args->size); - if (ret) - return -EFAULT; + if (likely(!i915_prefault_disable)) { + ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + args->size); + if (ret) + return -EFAULT; + } ret = i915_mutex_lock_interruptible(dev); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1b58694d7be7..1734825bef34 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -759,8 +759,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (!access_ok(VERIFY_WRITE, ptr, length)) return -EFAULT; - if (fault_in_multipages_readable(ptr, length)) - return -EFAULT; + if (likely(!i915_prefault_disable)) { + if (fault_in_multipages_readable(ptr, length)) + return -EFAULT; + } } return 0; -- cgit v1.2.3 From 31694658fa5bc604a2df2cbaf72d4cfc52e9db48 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 16:35:09 -0300 Subject: drm/i915: kill ivybridge_irq_preinstall After Daniel's latest changes it's now equal to ironlake_irq_preinstall. Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 99106990a326..d537b6151495 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2093,25 +2093,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev) ibx_irq_preinstall(dev); } -static void ivybridge_irq_preinstall(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - - atomic_set(&dev_priv->irq_received, 0); - - I915_WRITE(HWSTAM, 0xeffe); - - /* XXX hotplug from PCH */ - - I915_WRITE(DEIMR, 0xffffffff); - I915_WRITE(DEIER, 0x0); - POSTING_READ(DEIER); - - gen5_gt_irq_preinstall(dev); - - ibx_irq_preinstall(dev); -} - static void valleyview_irq_preinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -3120,7 +3101,7 @@ void intel_irq_init(struct drm_device *dev) } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { /* Share uninstall handlers with ILK/SNB */ dev->driver->irq_handler = ivybridge_irq_handler; - dev->driver->irq_preinstall = ivybridge_irq_preinstall; + dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ivybridge_enable_vblank; -- cgit v1.2.3 From c008bc6eda951dd091115374930de97de48a8b67 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 16:35:10 -0300 Subject: drm/i915: extract ilk_display_irq_handler It's the code that deals with de_iir. Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 104 +++++++++++++++++++++------------------- 1 file changed, 56 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d537b6151495..cdf0e4990300 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1197,6 +1197,60 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) cpt_serr_int_handler(dev); } +static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (de_iir & DE_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE) + intel_opregion_asle_intr(dev); + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + + if (de_iir & DE_POISON) + DRM_ERROR("Poison interrupt\n"); + + if (de_iir & DE_PIPEA_FIFO_UNDERRUN) + if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) + DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); + + if (de_iir & DE_PIPEB_FIFO_UNDERRUN) + if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) + DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); + + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + u32 pch_iir = I915_READ(SDEIIR); + + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev); +} + static irqreturn_t ivybridge_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; @@ -1355,54 +1409,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) else snb_gt_irq_handler(dev, dev_priv, gt_iir); - if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); - - if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev); - - if (de_iir & DE_PIPEA_VBLANK) - drm_handle_vblank(dev, 0); - - if (de_iir & DE_PIPEB_VBLANK) - drm_handle_vblank(dev, 1); - - if (de_iir & DE_POISON) - DRM_ERROR("Poison interrupt\n"); - - if (de_iir & DE_PIPEA_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) - DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); - - if (de_iir & DE_PIPEB_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) - DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); - - if (de_iir & DE_PLANEA_FLIP_DONE) { - intel_prepare_page_flip(dev, 0); - intel_finish_page_flip_plane(dev, 0); - } - - if (de_iir & DE_PLANEB_FLIP_DONE) { - intel_prepare_page_flip(dev, 1); - intel_finish_page_flip_plane(dev, 1); - } - - /* check event from PCH */ - if (de_iir & DE_PCH_EVENT) { - u32 pch_iir = I915_READ(SDEIIR); - - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); - else - ibx_irq_handler(dev, pch_iir); - - /* should clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } - - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev); + if (de_iir) + ilk_display_irq_handler(dev, de_iir); if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) gen6_rps_irq_handler(dev_priv, pm_iir); -- cgit v1.2.3 From 9719fb9852e4301d5b8d74feec141d3c3e60fae0 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 16:35:11 -0300 Subject: drm/i915: extract ivb_display_irq_handler Just like we did with ilk_display_irq_handler. Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 63 +++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cdf0e4990300..57cd702ab06c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1251,13 +1251,46 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) ironlake_rps_change_irq_handler(dev); } +static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (de_iir & DE_ERR_INT_IVB) + ivb_err_int_handler(dev); + + if (de_iir & DE_AUX_CHANNEL_A_IVB) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE_IVB) + intel_opregion_asle_intr(dev); + + for (i = 0; i < 3; i++) { + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + drm_handle_vblank(dev, i); + if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + intel_prepare_page_flip(dev, i); + intel_finish_page_flip_plane(dev, i); + } + } + + /* check event from PCH */ + if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } +} + static irqreturn_t ivybridge_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; irqreturn_t ret = IRQ_NONE; - int i; atomic_inc(&dev_priv->irq_received); @@ -1302,33 +1335,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) de_iir = I915_READ(DEIIR); if (de_iir) { - if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev); - - if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); - - if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev); - - for (i = 0; i < 3; i++) { - if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) - drm_handle_vblank(dev, i); - if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { - intel_prepare_page_flip(dev, i); - intel_finish_page_flip_plane(dev, i); - } - } - - /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { - u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - - /* clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } + ivb_display_irq_handler(dev, de_iir); I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; -- cgit v1.2.3 From 221ab43e8abe1e395d4bdd475ee3d4c2548f04ca Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 19:52:36 -0300 Subject: drm/i915: don't read or write GEN6_PMIIR on Gen 5 The register doesn't exist on Gen 5. v2: Simplify checks since pm_iir is always 0 on Gen 5 (Chris) Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 57cd702ab06c..077b4791476a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1384,7 +1384,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; + u32 de_iir, gt_iir, de_ier, pm_iir = 0, sde_ier; atomic_inc(&dev_priv->irq_received); @@ -1404,9 +1404,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); - pm_iir = I915_READ(GEN6_PMIIR); + if (IS_GEN6(dev)) + pm_iir = I915_READ(GEN6_PMIIR); - if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) + if (de_iir == 0 && gt_iir == 0 && pm_iir == 0) goto done; ret = IRQ_HANDLED; @@ -1419,12 +1420,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (de_iir) ilk_display_irq_handler(dev, de_iir); - if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) + if (pm_iir & GEN6_PM_RPS_EVENTS) gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(DEIIR, de_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); + if (pm_iir) + I915_WRITE(GEN6_PMIIR, pm_iir); done: I915_WRITE(DEIER, de_ier); -- cgit v1.2.3 From 27b9188e14f5f1033ed36ea84035898fe21e4f46 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 19:54:41 -0300 Subject: drm/i915: reorganize ironlake_irq_handler The ironlake_irq_handler and ivybridge_irq_handler functions do basically the same thing, but they have different implementation styles. With this patch we reorganize ironlake_irq_handler in a way that makes it look very similar to ivybridge_irq_handler. One of the advantages of this new function style is that we don't write 0 to the IIR registers anymore. v2: - Rebase due to changes on previous patches - Move pm_iir to a tighter scope (Chris) Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 46 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 077b4791476a..3ef8f23d4fa1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1384,7 +1384,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pm_iir = 0, sde_ier; + u32 de_iir, gt_iir, de_ier, sde_ier; atomic_inc(&dev_priv->irq_received); @@ -1402,33 +1402,33 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(SDEIER, 0); POSTING_READ(SDEIER); - de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); - if (IS_GEN6(dev)) - pm_iir = I915_READ(GEN6_PMIIR); - - if (de_iir == 0 && gt_iir == 0 && pm_iir == 0) - goto done; - - ret = IRQ_HANDLED; - - if (IS_GEN5(dev)) - ilk_gt_irq_handler(dev, dev_priv, gt_iir); - else - snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (gt_iir) { + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); + I915_WRITE(GTIIR, gt_iir); + ret = IRQ_HANDLED; + } - if (de_iir) + de_iir = I915_READ(DEIIR); + if (de_iir) { ilk_display_irq_handler(dev, de_iir); + I915_WRITE(DEIIR, de_iir); + ret = IRQ_HANDLED; + } - if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_rps_irq_handler(dev_priv, pm_iir); - - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(DEIIR, de_iir); - if (pm_iir) - I915_WRITE(GEN6_PMIIR, pm_iir); + if (IS_GEN6(dev)) { + u32 pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + if (pm_iir & GEN6_PM_RPS_EVENTS) + gen6_rps_irq_handler(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = IRQ_HANDLED; + } + } -done: I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); I915_WRITE(SDEIER, sde_ier); -- cgit v1.2.3 From 23a78516081c49398b6bf08d7a40e954048426bf Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 16:35:14 -0300 Subject: drm/i915: POSTING_READ(DEIER) on ivybridge_irq_handler We have this POSTING_READ inside ironlake_irq_handler. I suppose we also want it on IVB because we want to stop the IRQ handler as soon as possible at this point. Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3ef8f23d4fa1..597a3d5ae7e1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1305,6 +1305,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + POSTING_READ(DEIER); /* Disable south interrupts. We'll only write to SDEIIR once, so further * interrupts will will be stored on its back queue, and then we'll be -- cgit v1.2.3 From f1af8fc10cdb75da7f07f765e9af86dec064f2a8 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 19:56:30 -0300 Subject: drm/i915: add ILK/SNB support to ivybridge_irq_handler And then rename it to ironlake_irq_handler. Also move ilk_gt_irq_handler up to avoid forward declarations. In the previous patches I did small modifications to both ironlake_irq_handler an ivybridge_irq_handler so they became very similar functions. Now it should be very easy to verify that all we need to add ILK/SNB support is to call ilk_gt_irq_handler, call ilk_display_irq_handler and avoid reading pm_iir on gen 5. v2: - Rebase due to changes on the previous patches - Move pm_iir to a tighter scope (Chris) - Change some Gen checks for readability Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 115 +++++++++++----------------------------- 1 file changed, 32 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 597a3d5ae7e1..7c201f7906eb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -844,6 +844,17 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev) queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); } +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & + (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & ILK_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); +} + static void snb_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) @@ -1285,11 +1296,11 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) } } -static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; + u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; atomic_inc(&dev_priv->irq_received); @@ -1329,27 +1340,34 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) gt_iir = I915_READ(GTIIR); if (gt_iir) { - snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { - ivb_display_irq_handler(dev, de_iir); - + if (INTEL_INFO(dev)->gen >= 7) + ivb_display_irq_handler(dev, de_iir); + else + ilk_display_irq_handler(dev, de_iir); I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; } - pm_iir = I915_READ(GEN6_PMIIR); - if (pm_iir) { - if (IS_HASWELL(dev)) - hsw_pm_irq_handler(dev_priv, pm_iir); - else if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_rps_irq_handler(dev_priv, pm_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - ret = IRQ_HANDLED; + if (INTEL_INFO(dev)->gen >= 6) { + u32 pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + if (IS_HASWELL(dev)) + hsw_pm_irq_handler(dev_priv, pm_iir); + else if (pm_iir & GEN6_PM_RPS_EVENTS) + gen6_rps_irq_handler(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = IRQ_HANDLED; + } } if (IS_HASWELL(dev)) { @@ -1369,75 +1387,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) return ret; } -static void ilk_gt_irq_handler(struct drm_device *dev, - struct drm_i915_private *dev_priv, - u32 gt_iir) -{ - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); -} - -static irqreturn_t ironlake_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, sde_ier; - - atomic_inc(&dev_priv->irq_received); - - /* disable master interrupt before clearing iir */ - de_ier = I915_READ(DEIER); - I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - POSTING_READ(DEIER); - - /* Disable south interrupts. We'll only write to SDEIIR once, so further - * interrupts will will be stored on its back queue, and then we'll be - * able to process them after we restore SDEIER (as soon as we restore - * it, we'll get an interrupt if SDEIIR still has something to process - * due to its back queue). */ - sde_ier = I915_READ(SDEIER); - I915_WRITE(SDEIER, 0); - POSTING_READ(SDEIER); - - gt_iir = I915_READ(GTIIR); - if (gt_iir) { - if (IS_GEN5(dev)) - ilk_gt_irq_handler(dev, dev_priv, gt_iir); - else - snb_gt_irq_handler(dev, dev_priv, gt_iir); - I915_WRITE(GTIIR, gt_iir); - ret = IRQ_HANDLED; - } - - de_iir = I915_READ(DEIIR); - if (de_iir) { - ilk_display_irq_handler(dev, de_iir); - I915_WRITE(DEIIR, de_iir); - ret = IRQ_HANDLED; - } - - if (IS_GEN6(dev)) { - u32 pm_iir = I915_READ(GEN6_PMIIR); - if (pm_iir) { - if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_rps_irq_handler(dev_priv, pm_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - ret = IRQ_HANDLED; - } - } - - I915_WRITE(DEIER, de_ier); - POSTING_READ(DEIER); - I915_WRITE(SDEIER, sde_ier); - POSTING_READ(SDEIER); - - return ret; -} - /** * i915_error_work_func - do process context error handling work * @work: work struct @@ -3118,7 +3067,7 @@ void intel_irq_init(struct drm_device *dev) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { /* Share uninstall handlers with ILK/SNB */ - dev->driver->irq_handler = ivybridge_irq_handler; + dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; -- cgit v1.2.3 From b518421f5f91365a08ebe55497b32fe6d90ef4df Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 20:00:08 -0300 Subject: drm/i915: kill Ivybridge vblank irq vfuncs The IVB funtions are exactly the same as the ILK ones, with the exception of the bit register. So add IVB/HSW support to ironlake_enable_vblank and ironlake_disable_vblank, then kill the ivybridge functions. Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 41 ++++++++--------------------------------- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 11 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7c201f7906eb..9d5ae21f39ba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1658,29 +1658,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK_ILK(pipe); if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - return 0; -} - -static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (5 * pipe)); + ironlake_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -1731,21 +1716,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK_ILK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (pipe * 5)); + ironlake_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -3071,8 +3046,8 @@ void intel_irq_init(struct drm_device *dev) dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; - dev->driver->enable_vblank = ivybridge_enable_vblank; - dev->driver->disable_vblank = ivybridge_disable_vblank; + dev->driver->enable_vblank = ironlake_enable_vblank; + dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1d710966983e..e20f0937b3ec 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3787,6 +3787,9 @@ #define DE_PLANEA_FLIP_DONE_IVB (1<<3) #define DE_PIPEA_VBLANK_IVB (1<<0) +#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7)) +#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) + #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ #define MASTER_INTERRUPT_ENABLE (1<<31) -- cgit v1.2.3 From 8e76f8dc49f180b0e9d750426c99e37a7d6162ae Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 12 Jul 2013 20:01:56 -0300 Subject: drm/i915: kill ivybridge_irq_postinstall It was very similar to ironlake_irq_postinstall, so IMHO merging both functions results in a code that is easier to maintain. With this change, all the irq handler vfuncs between ironlake and ivybridge are now unified. v2: Add "(" and ")" to make at least one vim user much happier (Chris) Signed-off-by: Paulo Zanoni Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 69 ++++++++++++----------------------------- 1 file changed, 20 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9d5ae21f39ba..1fffb4b10a05 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2165,21 +2165,33 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { unsigned long irqflags; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | - DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | - DE_PIPEA_FIFO_UNDERRUN | DE_POISON; + u32 display_mask, extra_mask; + + if (INTEL_INFO(dev)->gen >= 7) { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | + DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | + DE_ERR_INT_IVB); + extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB); + + I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); + } else { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | + DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | + DE_PIPEA_FIFO_UNDERRUN | DE_POISON); + extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; + } dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, display_mask | - DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); + I915_WRITE(DEIER, display_mask | extra_mask); POSTING_READ(DEIER); gen5_gt_irq_postinstall(dev); @@ -2200,38 +2212,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) return 0; } -static int ivybridge_irq_postinstall(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = - DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | - DE_PLANEC_FLIP_DONE_IVB | - DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | - DE_AUX_CHANNEL_A_IVB | - DE_ERR_INT_IVB; - - dev_priv->irq_mask = ~display_mask; - - /* should always can generate irq */ - I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); - I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, - display_mask | - DE_PIPEC_VBLANK_IVB | - DE_PIPEB_VBLANK_IVB | - DE_PIPEA_VBLANK_IVB); - POSTING_READ(DEIER); - - gen5_gt_irq_postinstall(dev); - - ibx_irq_postinstall(dev); - - return 0; -} - static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -3040,15 +3020,6 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - /* Share uninstall handlers with ILK/SNB */ - dev->driver->irq_handler = ironlake_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_preinstall; - dev->driver->irq_postinstall = ivybridge_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_uninstall; - dev->driver->enable_vblank = ironlake_enable_vblank; - dev->driver->disable_vblank = ironlake_disable_vblank; - dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; -- cgit v1.2.3 From cce723ed091ac304d48386bcc3524994c345123e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 19 Jul 2013 09:16:42 -0700 Subject: drm/i915: Make i915 events part of uapi Make the uevent strings part of the user API for people who wish to write their own listeners. v2: Make a space in the string concatenation. (Chad) Use the "UEVENT" suffix intead of "EVENT" (Chad) Make kernel-doc parseable Docbook comments (Daniel) v3: Undid reset change introduced in last submission (Daniel) Fixed up comments to address removal changes. Thanks to Daniel Vetter for a majority of the parity error comments. CC: Chad Versace Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 8 ++++---- include/uapi/drm/i915_drm.h | 24 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1fffb4b10a05..58ee8269471f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -812,7 +812,7 @@ static void ivybridge_parity_work(struct work_struct *work) mutex_unlock(&dev_priv->dev->struct_mutex); - parity_event[0] = "L3_PARITY_ERROR=1"; + parity_event[0] = I915_L3_PARITY_UEVENT "=1"; parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); @@ -1402,9 +1402,9 @@ static void i915_error_work_func(struct work_struct *work) gpu_error); struct drm_device *dev = dev_priv->dev; struct intel_ring_buffer *ring; - char *error_event[] = { "ERROR=1", NULL }; - char *reset_event[] = { "RESET=1", NULL }; - char *reset_done_event[] = { "ERROR=0", NULL }; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int i, ret; kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 923ed7fe5775..a1a7b6bd60d8 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -33,6 +33,30 @@ * subject to backwards-compatibility constraints. */ +/** + * DOC: uevents generated by i915 on it's device node + * + * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch + * event from the gpu l3 cache. Additional information supplied is ROW, + * BANK, SUBBANK of the affected cacheline. Userspace should keep track of + * these events and if a specific cache-line seems to have a persistent + * error remap it with the l3 remapping tool supplied in intel-gpu-tools. + * The value supplied with the event is always 1. + * + * I915_ERROR_UEVENT - Generated upon error detection, currently only via + * hangcheck. The error detection event is a good indicator of when things + * began to go badly. The value supplied with the event is a 1 upon error + * detection, and a 0 upon reset completion, signifying no more error + * exists. NOTE: Disabling hangcheck or reset via module parameter will + * cause the related events to not be seen. + * + * I915_RESET_UEVENT - Event is generated just before an attempt to reset the + * the GPU. The value supplied with the event is always 1. NOTE: Disable + * reset via module parameter will cause this event to not be seen. + */ +#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" +#define I915_ERROR_UEVENT "ERROR" +#define I915_RESET_UEVENT "RESET" /* Each region is a minimum of 16k, and there are at most 255 of them. */ -- cgit v1.2.3 From d8fc8a47105bc744000cec280269e1054921f8d6 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 19 Jul 2013 18:57:55 -0300 Subject: drm/i915: invert {ilk, snb}_gt_irq_handler check Requested by Chris Wilson on IRC. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 58ee8269471f..f708e4efa1be 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1340,10 +1340,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) gt_iir = I915_READ(GTIIR); if (gt_iir) { - if (IS_GEN5(dev)) - ilk_gt_irq_handler(dev, dev_priv, gt_iir); - else + if (INTEL_INFO(dev)->gen >= 6) snb_gt_irq_handler(dev, dev_priv, gt_iir); + else + ilk_gt_irq_handler(dev, dev_priv, gt_iir); I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; } -- cgit v1.2.3 From 492d774db34fd601b3db17218cf0ae262b51b5cf Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:35 +0200 Subject: drm: remove drm_modctx ioctl and use drm_noop instead It doesn't do anything, so kill the code. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_context.c | 6 ------ drivers/gpu/drm/drm_drv.c | 2 +- include/drm/drmP.h | 2 -- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 725968d38976..6f9731ff6a5b 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -342,12 +342,6 @@ int drm_addctx(struct drm_device *dev, void *data, return 0; } -int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - /* This does nothing */ - return 0; -} - /** * Get context. * diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 99fcd7c32ea2..d668011dafd4 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 12083dc862a9..523e5f2e3218 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1335,8 +1335,6 @@ extern int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_modctx(struct drm_device *dev, void *data, - struct drm_file *file_priv); extern int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_switchctx(struct drm_device *dev, void *data, -- cgit v1.2.3 From 3dadef6c96c8aa6e67f83b30504256a0605ee4d6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:36 +0200 Subject: drm: kill dev->context_wait No one ever waits on this waitqueue, so the wake_up call is wasted. Remove it all. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_context.c | 1 - drivers/gpu/drm/drm_fops.c | 1 - include/drm/drmP.h | 1 - 3 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 6f9731ff6a5b..5cc17f32eb79 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -261,7 +261,6 @@ static int drm_context_switch_complete(struct drm_device *dev, when the kernel holds the lock, release that lock here. */ clear_bit(0, &dev->context_flag); - wake_up(&dev->context_wait); return 0; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 3a24385e0368..0dcbbdb6f972 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -81,7 +81,6 @@ static int drm_setup(struct drm_device * dev) dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; - init_waitqueue_head(&dev->context_wait); dev->if_version = 0; dev->ctx_start = 0; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 523e5f2e3218..449913313100 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1133,7 +1133,6 @@ struct drm_device { __volatile__ long context_flag; /**< Context swapping flag */ __volatile__ long interrupt_flag; /**< Interruption handler flag */ __volatile__ long dma_flag; /**< DMA dispatch flag */ - wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ unsigned long last_switch; /**< jiffies at last context switch */ -- cgit v1.2.3 From a17800c70129d5976a52c42f04a16a0f1d9df4b2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:37 +0200 Subject: drm: remove dev->last_switch Only ever assigned in the context code for real, with no readers anywhere. Remove it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_context.c | 1 - drivers/gpu/drm/drm_fops.c | 1 - include/drm/drmP.h | 1 - 3 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 5cc17f32eb79..224ff965bcf7 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -251,7 +251,6 @@ static int drm_context_switch_complete(struct drm_device *dev, struct drm_file *file_priv, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ - dev->last_switch = jiffies; if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { DRM_ERROR("Lock isn't held after context switch\n"); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 0dcbbdb6f972..c14fdc1c109b 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -79,7 +79,6 @@ static int drm_setup(struct drm_device * dev) dev->interrupt_flag = 0; dev->dma_flag = 0; dev->last_context = 0; - dev->last_switch = 0; dev->last_checked = 0; dev->if_version = 0; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 449913313100..089e82028e7a 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1135,7 +1135,6 @@ struct drm_device { __volatile__ long dma_flag; /**< DMA dispatch flag */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ - unsigned long last_switch; /**< jiffies at last context switch */ /*@} */ struct work_struct work; -- cgit v1.2.3 From c78d7531031cb6d163e7450bda563c267beef777 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:38 +0200 Subject: drm: kill dev->interrupt_flag and dev->dma_flag Completely unused, so just remove them. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 2 -- include/drm/drmP.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c14fdc1c109b..386c304c5cb1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -76,8 +76,6 @@ static int drm_setup(struct drm_device * dev) dev->sigdata.lock = NULL; dev->context_flag = 0; - dev->interrupt_flag = 0; - dev->dma_flag = 0; dev->last_context = 0; dev->last_checked = 0; dev->if_version = 0; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 089e82028e7a..1ec806f76ad9 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1131,8 +1131,6 @@ struct drm_device { /*@{ */ int irq_enabled; /**< True if irq handler is enabled */ __volatile__ long context_flag; /**< Context swapping flag */ - __volatile__ long interrupt_flag; /**< Interruption handler flag */ - __volatile__ long dma_flag; /**< DMA dispatch flag */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ /*@} */ -- cgit v1.2.3 From c7e00b6d6a08772fac43b0fcea7fb48e6a1fe390 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:39 +0200 Subject: drm: kill dev->ctx_start and dev->lck_start Again completely unused, so just remove it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 3 --- include/drm/drmP.h | 2 -- 2 files changed, 5 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 386c304c5cb1..a3714a0235cb 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -80,9 +80,6 @@ static int drm_setup(struct drm_device * dev) dev->last_checked = 0; dev->if_version = 0; - dev->ctx_start = 0; - dev->lck_start = 0; - dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1ec806f76ad9..040369571504 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1170,8 +1170,6 @@ struct drm_device { spinlock_t event_lock; /*@} */ - cycles_t ctx_start; - cycles_t lck_start; struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ wait_queue_head_t buf_readers; /**< Processes waiting to read */ -- cgit v1.2.3 From 5379dc042257cbf0fa2c54b4fa29d7077da35a25 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:40 +0200 Subject: drm/radoen: kill radeon_dma_ioctl_kms No need to create a dummy ioctl function to return -EINVAL, since that's what the core already does in the absence of the dma_ioctl callback. So we can safely remove this. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 3 --- drivers/gpu/drm/radeon/radeon_kms.c | 10 ---------- 2 files changed, 13 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 29876b1be8ec..c8eb2aa2879f 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -101,8 +101,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev); irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); -int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, - struct drm_file *file_priv); int radeon_gem_object_init(struct drm_gem_object *obj); void radeon_gem_object_free(struct drm_gem_object *obj); int radeon_gem_object_open(struct drm_gem_object *obj, @@ -421,7 +419,6 @@ static struct drm_driver kms_driver = { .gem_free_object = radeon_gem_object_free, .gem_open_object = radeon_gem_object_open, .gem_close_object = radeon_gem_object_close, - .dma_ioctl = radeon_dma_ioctl_kms, .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, .dumb_destroy = radeon_mode_dumb_destroy, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 49ff3d1a6102..07b023655bb4 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -683,16 +683,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, drmcrtc); } -/* - * IOCTL. - */ -int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - /* Not valid in KMS. */ - return -EINVAL; -} - #define KMS_INVALID_IOCTL(name) \ int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ { \ -- cgit v1.2.3 From 494f38e4e0c5ffca110e361cd3391f25313b52c7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:41 +0200 Subject: drm: kill dev->buf_readers and dev->buf_writers Again totally unused, so just remove them. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 2 -- include/drm/drmP.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index a3714a0235cb..57e30145f68e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -81,8 +81,6 @@ static int drm_setup(struct drm_device * dev) dev->if_version = 0; dev->buf_async = NULL; - init_waitqueue_head(&dev->buf_readers); - init_waitqueue_head(&dev->buf_writers); DRM_DEBUG("\n"); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 040369571504..6cde92465130 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1172,8 +1172,6 @@ struct drm_device { /*@} */ struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ - wait_queue_head_t buf_readers; /**< Processes waiting to read */ - wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ struct drm_agp_head *agp; /**< AGP data */ -- cgit v1.2.3 From 4cb4ea39cd61a9d71bf7227c5927c870fdde710c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:54 +0200 Subject: drm/nouveau: drop DRIVER_PCI_DMA and DRIVER_SG The former doesn't do anything without DRIVER_HAVE_DMA (which is force-disabled for kms drivers anyway). The latter isn't used by the (kms) nouveau ddx. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe5..e990327d117a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -675,7 +675,7 @@ nouveau_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_USE_AGP | DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, .load = nouveau_drm_load, -- cgit v1.2.3 From 81e9569760127e473020d1c50c95086614f8d203 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:49 +0200 Subject: drm/radeon: remove DRIVER_HAS_DMA/SG/PCI_DMA from the kms driver Really, this is all old-style stuff and just copy-pasta from the ums driver. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c8eb2aa2879f..cb7f1a8c5a4a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -388,8 +388,8 @@ static const struct file_operations radeon_driver_kms_fops = { static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | - DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM | + DRIVER_USE_AGP | DRIVER_USE_MTRR | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, .dev_priv_size = 0, .load = radeon_driver_load_kms, -- cgit v1.2.3 From 1d8d29cf2a9956ec5ea2395232a8121577a6cfee Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:50 +0200 Subject: drm: fold in drm_sg_alloc into the ioctl There's no other caller from driver code, so we can fold this in. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 2 +- drivers/gpu/drm/drm_scatter.c | 13 +++---------- include/drm/drmP.h | 3 +-- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index d668011dafd4..5993bfc8dcb9 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -122,7 +122,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif - DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index d87f60bbc330..a4a076ff1757 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -70,8 +70,10 @@ void drm_sg_cleanup(struct drm_sg_mem * entry) # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) +int drm_sg_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { + struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; unsigned long pages, i, j; @@ -181,15 +183,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) return -ENOMEM; } -int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - - return drm_sg_alloc(dev, request); - -} - int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 6cde92465130..1ea404fd745d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1567,9 +1567,8 @@ extern int drm_vma_info(struct seq_file *m, void *data); /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); -extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, +extern int drm_sg_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From 23367ff49065505e4a255dba2117f654ca26063f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 17:51:10 +0200 Subject: drm: rip out dev->last_checked Only ever re-cleared in drm_setup, otherwise completely unused. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 1 - include/drm/drmP.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 57e30145f68e..72acae908a7d 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -77,7 +77,6 @@ static int drm_setup(struct drm_device * dev) dev->context_flag = 0; dev->last_context = 0; - dev->last_checked = 0; dev->if_version = 0; dev->buf_async = NULL; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1ea404fd745d..dd03fd4239ad 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1131,7 +1131,6 @@ struct drm_device { /*@{ */ int irq_enabled; /**< True if irq handler is enabled */ __volatile__ long context_flag; /**< Context swapping flag */ - int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ /*@} */ -- cgit v1.2.3 From 89c8233f82d9c8af5b20e72e4a185a38a7d3c50b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Thu, 11 Jul 2013 11:56:32 +0200 Subject: drm/gem: simplify object initialization drm_gem_object_init() and drm_gem_private_object_init() do exactly the same (except for shmem alloc) so make the first use the latter to reduce code duplication. Also drop the return code from drm_gem_private_object_init(). It seems unlikely that we will extend it any time soon so no reason to keep it around. This simplifies code paths in drivers, too. Last but not least, fix gma500 to call drm_gem_object_release() before freeing objects that were allocated via drm_gem_private_object_init(). That isn't actually necessary for now, but might be in the future. Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Reviewed-by: Patrik Jakobsson Acked-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 20 ++++++++------------ drivers/gpu/drm/gma500/framebuffer.c | 6 ++---- drivers/gpu/drm/gma500/gem.c | 7 ++++--- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 7 +------ drivers/gpu/drm/i915/i915_gem_stolen.c | 4 +--- drivers/gpu/drm/omapdrm/omap_gem.c | 3 ++- include/drm/drmP.h | 4 ++-- 7 files changed, 20 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 603f256152ef..1ad9e7ec0119 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -132,16 +132,14 @@ drm_gem_destroy(struct drm_device *dev) int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { - BUG_ON((size & (PAGE_SIZE - 1)) != 0); + struct file *filp; - obj->dev = dev; - obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); - if (IS_ERR(obj->filp)) - return PTR_ERR(obj->filp); + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); - kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); - obj->size = size; + drm_gem_private_object_init(dev, obj, size); + obj->filp = filp; return 0; } @@ -152,8 +150,8 @@ EXPORT_SYMBOL(drm_gem_object_init); * no GEM provided backing store. Instead the caller is responsible for * backing the object and handling it. */ -int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size) { BUG_ON((size & (PAGE_SIZE - 1)) != 0); @@ -163,8 +161,6 @@ int drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); atomic_set(&obj->handle_count, 0); obj->size = size; - - return 0; } EXPORT_SYMBOL(drm_gem_private_object_init); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 8b1b6d923abe..362dd2ad286f 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) /* Begin by trying to use stolen memory backing */ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); if (backing) { - if (drm_gem_private_object_init(dev, - &backing->gem, aligned_size) == 0) - return backing; - psb_gtt_free_range(dev, backing); + drm_gem_private_object_init(dev, &backing->gem, aligned_size); + return backing; } return NULL; } diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index eefd6cc5b80d..fe1d3320ce6a 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -261,11 +261,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev, struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1); if (gtt == NULL) return -ENOMEM; - if (drm_gem_private_object_init(dev, >t->gem, size) != 0) - goto free_gtt; + + drm_gem_private_object_init(dev, >t->gem, size); if (drm_gem_handle_create(file, >t->gem, handle) == 0) return 0; -free_gtt: + + drm_gem_object_release(>t->gem); psb_gtt_free_range(dev, gtt); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..f2e185c9038f 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, goto fail_detach; } - ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - if (ret) { - i915_gem_object_free(obj); - goto fail_detach; - } - + drm_gem_private_object_init(dev, &obj->base, dma_buf->size); i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); obj->base.import_attach = attach; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 5c1a535d5072..55218332e625 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -291,9 +291,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj == NULL) return NULL; - if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) - goto cleanup; - + drm_gem_private_object_init(dev, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops); obj->pages = i915_pages_create_for_stolen(dev, diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index ebbdf4132e9c..cbcd71e6ed83 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1427,8 +1427,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj->height = gsize.tiled.height; } + ret = 0; if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) - ret = drm_gem_private_object_init(dev, obj, size); + drm_gem_private_object_init(dev, obj, size); else ret = drm_gem_object_init(dev, obj, size); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index dd03fd4239ad..86f524338e03 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1601,8 +1601,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, size_t size); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); -int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size); +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); void drm_gem_object_handle_free(struct drm_gem_object *obj); void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma); -- cgit v1.2.3 From c3911624f9ecff440340ad6d94dc5835f55b0db6 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Thu, 11 Jul 2013 11:56:33 +0200 Subject: drm/pci: remove useles #if 1 These don't make any sense, really.. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_pci.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 80c0b2b29801..a7b46ff80b0f 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -52,10 +52,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) { drm_dma_handle_t *dmah; -#if 1 unsigned long addr; size_t sz; -#endif /* pci_alloc_consistent only guarantees alignment to the smallest * PAGE_SIZE order which is greater than or equal to the requested size. @@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc); */ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { -#if 1 unsigned long addr; size_t sz; -#endif if (dmah->vaddr) { /* XXX - Is virt_to_page() legal for consistent mem? */ -- cgit v1.2.3 From da5cbe361c55fe17ef94d2587991997f81f8c4cb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 16 Jul 2013 09:11:56 +0200 Subject: drm/gem: remove drm_gem_object_handle_unreference It's unused, everyone is using the _unlocked variant only. Signed-off-by: Daniel Vetter Reviewed-by: Rob Clark Signed-off-by: Dave Airlie --- include/drm/drmP.h | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 86f524338e03..5664acea3000 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1648,24 +1648,6 @@ drm_gem_object_handle_reference(struct drm_gem_object *obj) atomic_inc(&obj->handle_count); } -static inline void -drm_gem_object_handle_unreference(struct drm_gem_object *obj) -{ - if (obj == NULL) - return; - - if (atomic_read(&obj->handle_count) == 0) - return; - /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ - if (atomic_dec_and_test(&obj->handle_count)) - drm_gem_object_handle_free(obj); - drm_gem_object_unreference(obj); -} - static inline void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { -- cgit v1.2.3 From bd0c0ceef6b1e7cb8c5dc1c9c6d168bae6bccaa0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:56 +0200 Subject: drm: move drm_getsarea into drm_bufs.c It fiddles the sarea out of the maps which are also handled in drm_bufs.c With this drm_drv.c is a notch more legacy free. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 14 ++++++++++++++ drivers/gpu/drm/drm_drv.c | 15 --------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 5a4dbb410b71..9d09f4c48a4e 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1600,6 +1600,20 @@ int drm_mapbufs(struct drm_device *dev, void *data, return retcode; } +struct drm_local_map *drm_getsarea(struct drm_device *dev) +{ + struct drm_map_list *entry; + + list_for_each_entry(entry, &dev->maplist, head) { + if (entry->map && entry->map->type == _DRM_SHM && + (entry->map->flags & _DRM_CONTAINS_LOCK)) { + return entry->map; + } + } + return NULL; +} +EXPORT_SYMBOL(drm_getsarea); + /** * Compute size order. Returns the exponent of the smaller power of two which * is greater or equal to given number. diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 5993bfc8dcb9..36103d1660d1 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -485,19 +485,4 @@ long drm_ioctl(struct file *filp, DRM_DEBUG("ret = %d\n", retcode); return retcode; } - EXPORT_SYMBOL(drm_ioctl); - -struct drm_local_map *drm_getsarea(struct drm_device *dev) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - if (entry->map && entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK)) { - return entry->map; - } - } - return NULL; -} -EXPORT_SYMBOL(drm_getsarea); -- cgit v1.2.3 From 04420c9c6a6d027511218b37ad986b9093667a91 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:57 +0200 Subject: drm/bufs: s/drm_order/order_base_2/ The version offered by the core is ridiculously optimized and does the same thing. So use it. Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 9d09f4c48a4e..c5bbcaaa73c0 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, } map->handle = vmalloc_user(map->size); DRM_DEBUG("%lu %d %p\n", - map->size, drm_order(map->size), map->handle); + map->size, order_base_2(map->size), map->handle); if (!map->handle) { kfree(map); return -ENOMEM; @@ -630,7 +630,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) return -EINVAL; count = request->count; - order = drm_order(request->size); + order = order_base_2(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) @@ -800,7 +800,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) return -EPERM; count = request->count; - order = drm_order(request->size); + order = order_base_2(request->size); size = 1 << order; DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", @@ -1002,7 +1002,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request return -EPERM; count = request->count; - order = drm_order(request->size); + order = order_base_2(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) @@ -1157,7 +1157,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request return -EPERM; count = request->count; - order = drm_order(request->size); + order = order_base_2(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) @@ -1435,7 +1435,7 @@ int drm_markbufs(struct drm_device *dev, void *data, DRM_DEBUG("%d, %d, %d\n", request->size, request->low_mark, request->high_mark); - order = drm_order(request->size); + order = order_base_2(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; -- cgit v1.2.3 From 0e267944f6ffc908eec4751d887f3a8936f412fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:58 +0200 Subject: drm/r128: s/drm_order/order_base_2/ Again just use the version provided by the linux core. Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/r128/r128_cce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index d4660cf942a5..c451257f08fb 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; - dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; -- cgit v1.2.3 From b72a8925fd5cc80107e3988536290d087b1079aa Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:11:59 +0200 Subject: drm/radeon: s/drm_order/order_base_2/ Last driver and pretty obviously a major user of this little function. Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/cik.c | 14 +++++++------- drivers/gpu/drm/radeon/evergreen.c | 4 ++-- drivers/gpu/drm/radeon/ni.c | 6 +++--- drivers/gpu/drm/radeon/r100.c | 2 +- drivers/gpu/drm/radeon/r600.c | 14 +++++++------- drivers/gpu/drm/radeon/r600_cp.c | 6 +++--- drivers/gpu/drm/radeon/radeon_cp.c | 6 +++--- drivers/gpu/drm/radeon/si.c | 14 +++++++------- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..6adbc998349e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ tmp = RREG32(CP_HPD_EOP_CONTROL); tmp &= ~EOP_SIZE_MASK; - tmp |= drm_order(MEC_HPD_SIZE / 8); + tmp |= order_base_2(MEC_HPD_SIZE / 8); WREG32(CP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(rdev, 0, 0, 0, 0); @@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); mqd->queue_state.cp_hqd_pq_control |= - drm_order(rdev->ring[idx].ring_size / 8); + order_base_2(rdev->ring[idx].ring_size / 8); mqd->queue_state.cp_hqd_pq_control |= - (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); + (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8); #ifdef __BIG_ENDIAN mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; #endif @@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; @@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..b67c9ec7f690 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev) RREG32(GRBM_SOFT_RESET); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..5b6e47765656 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ ring = &rdev->ring[ridx[i]]; - rb_cntl = drm_order(ring->ring_size / 8); - rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; + rb_cntl = order_base_2(ring->ring_size / 8); + rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8; #ifdef __BIG_ENDIAN rb_cntl |= BUF_SWAP_32BIT; #endif @@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev) WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 75349cdaa84b..5625cf706f0c 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) } /* Align ring size */ - rb_bufsz = drm_order(ring_size / 8); + rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; r100_cp_load_microcode(rdev); r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a09412..319e1ee1844a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(GRBM_SOFT_RESET, 0); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign int r; /* Align ring size */ - rb_bufsz = drm_order(ring_size / 8); + rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; ring->ring_size = ring_size; ring->align_mask = 16 - 1; @@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev) WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; @@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size); + rb_bufsz = order_base_2(ring->ring_size); rb_bufsz = (0x1 << 8) | rb_bufsz; WREG32(UVD_RBC_RB_CNTL, rb_bufsz); @@ -3812,7 +3812,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) u32 rb_bufsz; /* Align ring size */ - rb_bufsz = drm_order(ring_size / 4); + rb_bufsz = order_base_2(ring_size / 4); ring_size = (1 << rb_bufsz) * 4; rdev->ih.ring_size = ring_size; rdev->ih.ptr_mask = rdev->ih.ring_size - 1; @@ -4049,7 +4049,7 @@ int r600_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 1c51c08b1fde..d8eb48bff0ed 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; - dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; - dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); + dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; - dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); + dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index efc4f6441ef4..3cae2bbc1854 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; - dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; - dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); + dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; - dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); + dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9f..d71037f4f68f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | -- cgit v1.2.3 From 85d9cb41db3bf0f36c999c2e547b37cb9f32367b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 10 Jul 2013 14:12:00 +0200 Subject: drm: remove drm_order All users of it are now gone! Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 23 ----------------------- include/drm/drmP.h | 1 - 2 files changed, 24 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index c5bbcaaa73c0..bef4abff8fa3 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1613,26 +1613,3 @@ struct drm_local_map *drm_getsarea(struct drm_device *dev) return NULL; } EXPORT_SYMBOL(drm_getsarea); - -/** - * Compute size order. Returns the exponent of the smaller power of two which - * is greater or equal to given number. - * - * \param size size. - * \return order. - * - * \todo Can be made faster. - */ -int drm_order(unsigned long size) -{ - int order; - unsigned long tmp; - - for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; - - if (size & (size - 1)) - ++order; - - return order; -} -EXPORT_SYMBOL(drm_order); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5664acea3000..0ab6a090a15c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1394,7 +1394,6 @@ extern int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_order(unsigned long size); /* DMA support (drm_dma.h) */ extern int drm_dma_setup(struct drm_device *dev); -- cgit v1.2.3 From 5ea75e0f05d03007369f155c6c67541bc4ec309f Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Sun, 30 Jun 2013 21:39:00 +0200 Subject: drm/gma500: Add generic code for clock calculation This patch aims to unify the bits and pieces that are common (or similar enough) for pll clock calculations. Nothing makes use of this code yet That will come in later patches. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/Makefile | 1 + drivers/gpu/drm/gma500/gma_display.c | 143 +++++++++++++++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 74 +++++++++++++++ drivers/gpu/drm/gma500/psb_drv.h | 2 + drivers/gpu/drm/gma500/psb_intel_display.c | 3 + drivers/gpu/drm/gma500/psb_intel_drv.h | 3 + 6 files changed, 226 insertions(+) create mode 100644 drivers/gpu/drm/gma500/gma_display.c create mode 100644 drivers/gpu/drm/gma500/gma_display.h diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index 7a2d40a5c1e1..e9064dd9045d 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -15,6 +15,7 @@ gma500_gfx-y += \ mmu.o \ power.o \ psb_drv.o \ + gma_display.o \ psb_intel_display.o \ psb_intel_lvds.o \ psb_intel_modes.o \ diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c new file mode 100644 index 000000000000..8f66d5c6505b --- /dev/null +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -0,0 +1,143 @@ +/* + * Copyright © 2006-2011 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Authors: + * Eric Anholt + * Patrik Jakobsson + */ + +#include +#include "gma_display.h" +#include "psb_intel_drv.h" +#include "psb_intel_reg.h" +#include "psb_drv.h" + +/** + * Returns whether any output on the specified pipe is of the specified type + */ +bool gma_pipe_has_type(struct drm_crtc *crtc, int type) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *l_entry; + + list_for_each_entry(l_entry, &mode_config->connector_list, head) { + if (l_entry->encoder && l_entry->encoder->crtc == crtc) { + struct psb_intel_encoder *psb_intel_encoder = + psb_intel_attached_encoder(l_entry); + if (psb_intel_encoder->type == type) + return true; + } + } + + return false; +} + +#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } + +bool gma_pll_is_valid(struct drm_crtc *crtc, + const struct gma_limit_t *limit, + struct gma_clock_t *clock) +{ + if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) + GMA_PLL_INVALID("p1 out of range"); + if (clock->p < limit->p.min || limit->p.max < clock->p) + GMA_PLL_INVALID("p out of range"); + if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) + GMA_PLL_INVALID("m2 out of range"); + if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) + GMA_PLL_INVALID("m1 out of range"); + /* On CDV m1 is always 0 */ + if (clock->m1 <= clock->m2 && clock->m1 != 0) + GMA_PLL_INVALID("m1 <= m2 && m1 != 0"); + if (clock->m < limit->m.min || limit->m.max < clock->m) + GMA_PLL_INVALID("m out of range"); + if (clock->n < limit->n.min || limit->n.max < clock->n) + GMA_PLL_INVALID("n out of range"); + if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) + GMA_PLL_INVALID("vco out of range"); + /* XXX: We may need to be checking "Dot clock" + * depending on the multiplier, connector, etc., + * rather than just a single range. + */ + if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) + GMA_PLL_INVALID("dot out of range"); + + return true; +} + +bool gma_find_best_pll(const struct gma_limit_t *limit, + struct drm_crtc *crtc, int target, int refclk, + struct gma_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + const struct gma_clock_funcs *clock_funcs = + to_psb_intel_crtc(crtc)->clock_funcs; + struct gma_clock_t clock; + int err = target; + + if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { + /* + * For LVDS, if the panel is on, just rely on its current + * settings for dual-channel. We haven't figured out how to + * reliably set up different single/dual channel state, if we + * even can. + */ + if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + + /* m1 is always 0 on CDV so the outmost loop will run just once */ + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { + for (clock.m2 = limit->m2.min; + (clock.m2 < clock.m1 || clock.m1 == 0) && + clock.m2 <= limit->m2.max; clock.m2++) { + for (clock.n = limit->n.min; + clock.n <= limit->n.max; clock.n++) { + for (clock.p1 = limit->p1.min; + clock.p1 <= limit->p1.max; + clock.p1++) { + int this_err; + + clock_funcs->clock(refclk, &clock); + + if (!clock_funcs->pll_is_valid(crtc, + limit, &clock)) + continue; + + this_err = abs(clock.dot - target); + if (this_err < err) { + *best_clock = clock; + err = this_err; + } + } + } + } + } + + return err != target; +} diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h new file mode 100644 index 000000000000..a5d8aa31b5b7 --- /dev/null +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -0,0 +1,74 @@ +/* + * Copyright © 2006-2011 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Authors: + * Eric Anholt + * Patrik Jakobsson + */ + +#ifndef _GMA_DISPLAY_H_ +#define _GMA_DISPLAY_H_ + +struct gma_clock_t { + /* given values */ + int n; + int m1, m2; + int p1, p2; + /* derived values */ + int dot; + int vco; + int m; + int p; +}; + +struct gma_range_t { + int min, max; +}; + +struct gma_p2_t { + int dot_limit; + int p2_slow, p2_fast; +}; + +struct gma_limit_t { + struct gma_range_t dot, vco, n, m, m1, m2, p, p1; + struct gma_p2_t p2; + bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *, + int target, int refclk, + struct gma_clock_t *best_clock); +}; + +struct gma_clock_funcs { + void (*clock)(int refclk, struct gma_clock_t *clock); + const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk); + bool (*pll_is_valid)(struct drm_crtc *crtc, + const struct gma_limit_t *limit, + struct gma_clock_t *clock); +}; + +/* Common pipe related functions */ +extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type); + +/* Common clock related functions */ +extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); +extern void gma_clock(int refclk, struct gma_clock_t *clock); +extern bool gma_pll_is_valid(struct drm_crtc *crtc, + const struct gma_limit_t *limit, + struct gma_clock_t *clock); +extern bool gma_find_best_pll(const struct gma_limit_t *limit, + struct drm_crtc *crtc, int target, int refclk, + struct gma_clock_t *best_clock); +#endif diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 6053b8abcd12..eeed88c3c37e 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -27,6 +27,7 @@ #include #include "psb_reg.h" #include "psb_intel_drv.h" +#include "gma_display.h" #include "intel_bios.h" #include "gtt.h" #include "power.h" @@ -675,6 +676,7 @@ struct psb_ops { /* Sub functions */ struct drm_crtc_helper_funcs const *crtc_helper; struct drm_crtc_funcs const *crtc_funcs; + const struct gma_clock_funcs *clock_funcs; /* Setup hooks */ int (*chip_setup)(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 6666493789d1..0f1d069afa11 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -1251,6 +1251,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, /* Set the CRTC operations from the chip specific data */ drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); + /* Set the CRTC clock functions from chip specific data */ + psb_intel_crtc->clock_funcs = dev_priv->ops->clock_funcs; + drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); psb_intel_crtc->pipe = pipe; psb_intel_crtc->plane = pipe; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 4dcae421a58d..bfe0408c1291 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -24,6 +24,7 @@ #include #include #include +#include "gma_display.h" /* * Display related stuff @@ -188,6 +189,8 @@ struct psb_intel_crtc { /* Saved Crtc HW states */ struct psb_intel_crtc_state *crtc_state; + + const struct gma_clock_funcs *clock_funcs; }; #define to_psb_intel_crtc(x) \ -- cgit v1.2.3 From 2adb29ff61c97982addf702d7da106569e217329 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Mon, 1 Jul 2013 01:42:16 +0200 Subject: drm/gma500/cdv: Make use of the generic clock code Add chip specific callbacks for the generic and non-generic clock calculation code. Also remove as much dupilicated code as possible. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_device.c | 1 + drivers/gpu/drm/gma500/cdv_device.h | 1 + drivers/gpu/drm/gma500/cdv_intel_display.c | 197 +++++------------------------ 3 files changed, 36 insertions(+), 163 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 23e14e93991f..daa45b5b3a45 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = { .crtc_helper = &cdv_intel_helper_funcs, .crtc_funcs = &cdv_intel_crtc_funcs, + .clock_funcs = &cdv_clock_funcs, .output_init = cdv_output_init, .hotplug = cdv_hotplug_event, diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 9561e17621b3..0fcb608bbdf9 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h @@ -17,6 +17,7 @@ extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs; extern const struct drm_crtc_funcs cdv_intel_crtc_funcs; +extern const struct gma_clock_funcs cdv_clock_funcs; extern void cdv_intel_crt_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); extern void cdv_intel_lvds_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 82430ad8ba62..82f1ae46e0ac 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -30,43 +30,10 @@ #include "power.h" #include "cdv_device.h" +static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, + struct drm_crtc *crtc, int target, + int refclk, struct gma_clock_t *best_clock); -struct cdv_intel_range_t { - int min, max; -}; - -struct cdv_intel_p2_t { - int dot_limit; - int p2_slow, p2_fast; -}; - -struct cdv_intel_clock_t { - /* given values */ - int n; - int m1, m2; - int p1, p2; - /* derived values */ - int dot; - int vco; - int m; - int p; -}; - -#define INTEL_P2_NUM 2 - -struct cdv_intel_limit_t { - struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1; - struct cdv_intel_p2_t p2; - bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *, - int, int, struct cdv_intel_clock_t *); -}; - -static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, - struct drm_crtc *crtc, int target, int refclk, - struct cdv_intel_clock_t *best_clock); -static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, - int refclk, - struct cdv_intel_clock_t *best_clock); #define CDV_LIMIT_SINGLE_LVDS_96 0 #define CDV_LIMIT_SINGLE_LVDS_100 1 @@ -75,7 +42,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct #define CDV_LIMIT_DP_27 4 #define CDV_LIMIT_DP_100 5 -static const struct cdv_intel_limit_t cdv_intel_limits[] = { +static const struct gma_limit_t cdv_intel_limits[] = { { /* CDV_SINGLE_LVDS_96MHz */ .dot = {.min = 20000, .max = 115500}, .vco = {.min = 1800000, .max = 3600000}, @@ -85,9 +52,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .m2 = {.min = 58, .max = 158}, .p = {.min = 28, .max = 140}, .p1 = {.min = 2, .max = 10}, - .p2 = {.dot_limit = 200000, - .p2_slow = 14, .p2_fast = 14}, - .find_pll = cdv_intel_find_best_PLL, + .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, + .find_pll = gma_find_best_pll, }, { /* CDV_SINGLE_LVDS_100MHz */ .dot = {.min = 20000, .max = 115500}, @@ -102,7 +68,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { * is 80-224Mhz. Prefer single channel as much as possible. */ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, - .find_pll = cdv_intel_find_best_PLL, + .find_pll = gma_find_best_pll, }, { /* CDV_DAC_HDMI_27MHz */ .dot = {.min = 20000, .max = 400000}, @@ -114,7 +80,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 90}, .p1 = {.min = 1, .max = 9}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, - .find_pll = cdv_intel_find_best_PLL, + .find_pll = gma_find_best_pll, }, { /* CDV_DAC_HDMI_96MHz */ .dot = {.min = 20000, .max = 400000}, @@ -126,7 +92,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 100}, .p1 = {.min = 1, .max = 10}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, - .find_pll = cdv_intel_find_best_PLL, + .find_pll = gma_find_best_pll, }, { /* CDV_DP_27MHz */ .dot = {.min = 160000, .max = 272000}, @@ -255,7 +221,7 @@ void cdv_sb_reset(struct drm_device *dev) */ static int cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, - struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) + struct gma_clock_t *clock, bool is_lvds, u32 ddi_select) { struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); int pipe = psb_crtc->pipe; @@ -405,31 +371,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, return 0; } -/* - * Returns whether any encoder on the specified pipe is of the specified type - */ -static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type) +static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc, + int refclk) { - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *l_entry; - - list_for_each_entry(l_entry, &mode_config->connector_list, head) { - if (l_entry->encoder && l_entry->encoder->crtc == crtc) { - struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(l_entry); - if (psb_intel_encoder->type == type) - return true; - } - } - return false; -} - -static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc, - int refclk) -{ - const struct cdv_intel_limit_t *limit; - if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + const struct gma_limit_t *limit; + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { /* * Now only single-channel LVDS is supported on CDV. If it is * incorrect, please add the dual-channel LVDS. @@ -454,8 +400,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc, } /* m1 is reserved as 0 in CDV, n is a ring counter */ -static void cdv_intel_clock(struct drm_device *dev, - int refclk, struct cdv_intel_clock_t *clock) +static void cdv_intel_clock(int refclk, struct gma_clock_t *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -463,93 +408,12 @@ static void cdv_intel_clock(struct drm_device *dev, clock->dot = clock->vco / clock->p; } - -#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } -static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, - const struct cdv_intel_limit_t *limit, - struct cdv_intel_clock_t *clock) +static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, + struct drm_crtc *crtc, int target, + int refclk, + struct gma_clock_t *best_clock) { - if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) - INTELPllInvalid("p1 out of range\n"); - if (clock->p < limit->p.min || limit->p.max < clock->p) - INTELPllInvalid("p out of range\n"); - /* unnecessary to check the range of m(m1/M2)/n again */ - if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) - INTELPllInvalid("vco out of range\n"); - /* XXX: We may need to be checking "Dot clock" - * depending on the multiplier, connector, etc., - * rather than just a single range. - */ - if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) - INTELPllInvalid("dot out of range\n"); - - return true; -} - -static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, - struct drm_crtc *crtc, int target, int refclk, - struct cdv_intel_clock_t *best_clock) -{ - struct drm_device *dev = crtc->dev; - struct cdv_intel_clock_t clock; - int err = target; - - - if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && - (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { - /* - * For LVDS, if the panel is on, just rely on its current - * settings for dual-channel. We haven't figured out how to - * reliably set up different single/dual channel state, if we - * even can. - */ - if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) - clock.p2 = limit->p2.p2_fast; - else - clock.p2 = limit->p2.p2_slow; - } else { - if (target < limit->p2.dot_limit) - clock.p2 = limit->p2.p2_slow; - else - clock.p2 = limit->p2.p2_fast; - } - - memset(best_clock, 0, sizeof(*best_clock)); - clock.m1 = 0; - /* m1 is reserved as 0 in CDV, n is a ring counter. - So skip the m1 loop */ - for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { - for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; - clock.m2++) { - for (clock.p1 = limit->p1.min; - clock.p1 <= limit->p1.max; - clock.p1++) { - int this_err; - - cdv_intel_clock(dev, refclk, &clock); - - if (!cdv_intel_PLL_is_valid(crtc, - limit, &clock)) - continue; - - this_err = abs(clock.dot - target); - if (this_err < err) { - *best_clock = clock; - err = this_err; - } - } - } - } - - return err != target; -} - -static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, - int refclk, - struct cdv_intel_clock_t *best_clock) -{ - struct cdv_intel_clock_t clock; + struct gma_clock_t clock; if (refclk == 27000) { if (target < 200000) { clock.p1 = 2; @@ -584,7 +448,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct clock.p = clock.p1 * clock.p2; clock.vco = (refclk * clock.m) / clock.n; clock.dot = clock.vco / clock.p; - memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); + memcpy(best_clock, &clock, sizeof(struct gma_clock_t)); return true; } @@ -1035,14 +899,14 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, int pipe = psb_intel_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk; - struct cdv_intel_clock_t clock; + struct gma_clock_t clock; u32 dpll = 0, dspcntr, pipeconf; bool ok; bool is_crt = false, is_lvds = false, is_tv = false; bool is_hdmi = false, is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; - const struct cdv_intel_limit_t *limit; + const struct gma_limit_t *limit; u32 ddi_select = 0; bool is_edp = false; @@ -1108,12 +972,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, drm_mode_debug_printmodeline(adjusted_mode); - limit = cdv_intel_limit(crtc, refclk); + limit = psb_intel_crtc->clock_funcs->limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { - dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); + DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d", + adjusted_mode->clock, clock.dot); return 0; } @@ -1612,7 +1477,7 @@ static int cdv_crtc_set_config(struct drm_mode_set *set) /* FIXME: why are we using this, should it be cdv_ in this tree ? */ -static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock) +static void i8xx_clock(int refclk, struct gma_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; @@ -1630,7 +1495,7 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev, const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 dpll; u32 fp; - struct cdv_intel_clock_t clock; + struct gma_clock_t clock; bool is_lvds; struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; @@ -1788,3 +1653,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = { .set_config = cdv_crtc_set_config, .destroy = cdv_intel_crtc_destroy, }; + +const struct gma_clock_funcs cdv_clock_funcs = { + .clock = cdv_intel_clock, + .limit = cdv_intel_limit, + .pll_is_valid = gma_pll_is_valid, +}; -- cgit v1.2.3 From fe477cc1b09ecd957c8c201b4f9c84e9d03621d4 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Tue, 2 Jul 2013 17:02:22 +0200 Subject: drm/gma500: Make use of gma_pipe_has_type() Replace any use of xxx_intel_pipe_has_type() with the generic gma_pipe_has_type() function. Poulsbo still use it but that will be removed when we rip out psb_intel_pipe_has_type(). Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 8 ++++---- drivers/gpu/drm/gma500/mdfld_intel_display.c | 8 ++++---- drivers/gpu/drm/gma500/oaktrail_crtc.c | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 82f1ae46e0ac..fe6c6594eb19 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -26,7 +26,7 @@ #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" -#include "psb_intel_display.h" +#include "gma_display.h" #include "power.h" #include "cdv_device.h" @@ -375,7 +375,7 @@ static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc, int refclk) { const struct gma_limit_t *limit; - if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { /* * Now only single-channel LVDS is supported on CDV. If it is * incorrect, please add the dual-channel LVDS. @@ -384,8 +384,8 @@ static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc, limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; else limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; - } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { + } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { if (refclk == 27000) limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; else diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 74485dc43945..aa6528dae072 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -23,7 +23,7 @@ #include #include "psb_intel_reg.h" -#include "psb_intel_display.h" +#include "gma_display.h" #include "framebuffer.h" #include "mdfld_output.h" #include "mdfld_dsi_output.h" @@ -611,8 +611,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) - || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { + if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) + || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; else if (ksel == KSEL_BYPASS_25) @@ -624,7 +624,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc) (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; - } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { + } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; else if (ksel == KSEL_BYPASS_25) diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 3071526bc3c1..75567ee8a519 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -23,7 +23,7 @@ #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" -#include "psb_intel_display.h" +#include "gma_display.h" #include "power.h" struct psb_intel_range_t { @@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { + if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { switch (dev_priv->core_freq) { case 100: limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; -- cgit v1.2.3 From 7f67c06721641df12ed68249218d1c2118517f78 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Tue, 2 Jul 2013 17:07:59 +0200 Subject: drm/gma500/psb: Make use of generic clock code Add chip specific callbacks for the generic and non-generic clock calculation code. Also remove as much dupilicated code as possible. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_device.c | 3 +- drivers/gpu/drm/gma500/psb_device.h | 24 ++++ drivers/gpu/drm/gma500/psb_intel_display.c | 189 ++++------------------------- drivers/gpu/drm/gma500/psb_intel_display.h | 2 - 4 files changed, 51 insertions(+), 167 deletions(-) create mode 100644 drivers/gpu/drm/gma500/psb_device.h diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index f6f534b4197e..697678619bd1 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -25,7 +25,7 @@ #include "psb_reg.h" #include "psb_intel_reg.h" #include "intel_bios.h" - +#include "psb_device.h" static int psb_output_init(struct drm_device *dev) { @@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = { .crtc_helper = &psb_intel_helper_funcs, .crtc_funcs = &psb_intel_crtc_funcs, + .clock_funcs = &psb_clock_funcs, .output_init = psb_output_init, diff --git a/drivers/gpu/drm/gma500/psb_device.h b/drivers/gpu/drm/gma500/psb_device.h new file mode 100644 index 000000000000..35e304c7f85a --- /dev/null +++ b/drivers/gpu/drm/gma500/psb_device.h @@ -0,0 +1,24 @@ +/* + * Copyright © 2013 Patrik Jakobsson + * Copyright © 2011 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _PSB_DEVICE_H_ +#define _PSB_DEVICE_H_ + +extern const struct gma_clock_funcs psb_clock_funcs; + +#endif diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 0f1d069afa11..89be7a3632ef 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -26,39 +26,13 @@ #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" -#include "psb_intel_display.h" +#include "gma_display.h" #include "power.h" -struct psb_intel_clock_t { - /* given values */ - int n; - int m1, m2; - int p1, p2; - /* derived values */ - int dot; - int vco; - int m; - int p; -}; - -struct psb_intel_range_t { - int min, max; -}; - -struct psb_intel_p2_t { - int dot_limit; - int p2_slow, p2_fast; -}; - -struct psb_intel_limit_t { - struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; - struct psb_intel_p2_t p2; -}; - #define INTEL_LIMIT_I9XX_SDVO_DAC 0 #define INTEL_LIMIT_I9XX_LVDS 1 -static const struct psb_intel_limit_t psb_intel_limits[] = { +static const struct gma_limit_t psb_intel_limits[] = { { /* INTEL_LIMIT_I9XX_SDVO_DAC */ .dot = {.min = 20000, .max = 400000}, .vco = {.min = 1400000, .max = 2800000}, @@ -68,8 +42,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = { .m2 = {.min = 3, .max = 7}, .p = {.min = 5, .max = 80}, .p1 = {.min = 1, .max = 8}, - .p2 = {.dot_limit = 200000, - .p2_slow = 10, .p2_fast = 5}, + .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5}, + .find_pll = gma_find_best_pll, }, { /* INTEL_LIMIT_I9XX_LVDS */ .dot = {.min = 20000, .max = 400000}, @@ -83,23 +57,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = { /* The single-channel range is 25-112Mhz, and dual-channel * is 80-224Mhz. Prefer single channel as much as possible. */ - .p2 = {.dot_limit = 112000, - .p2_slow = 14, .p2_fast = 7}, + .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7}, + .find_pll = gma_find_best_pll, }, }; -static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) +static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc, + int refclk) { - const struct psb_intel_limit_t *limit; + const struct gma_limit_t *limit; - if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; else limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; return limit; } -static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock) +static void psb_intel_clock(int refclk, struct gma_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; @@ -107,130 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock) clock->dot = clock->vco / clock->p; } -/** - * Returns whether any output on the specified pipe is of the specified type - */ -bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *l_entry; - - list_for_each_entry(l_entry, &mode_config->connector_list, head) { - if (l_entry->encoder && l_entry->encoder->crtc == crtc) { - struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(l_entry); - if (psb_intel_encoder->type == type) - return true; - } - } - return false; -} - -#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } -/** - * Returns whether the given set of divisors are valid for a given refclk with - * the given connectors. - */ - -static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, - struct psb_intel_clock_t *clock) -{ - const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); - - if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) - INTELPllInvalid("p1 out of range\n"); - if (clock->p < limit->p.min || limit->p.max < clock->p) - INTELPllInvalid("p out of range\n"); - if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) - INTELPllInvalid("m2 out of range\n"); - if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) - INTELPllInvalid("m1 out of range\n"); - if (clock->m1 <= clock->m2) - INTELPllInvalid("m1 <= m2\n"); - if (clock->m < limit->m.min || limit->m.max < clock->m) - INTELPllInvalid("m out of range\n"); - if (clock->n < limit->n.min || limit->n.max < clock->n) - INTELPllInvalid("n out of range\n"); - if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) - INTELPllInvalid("vco out of range\n"); - /* XXX: We may need to be checking "Dot clock" - * depending on the multiplier, connector, etc., - * rather than just a single range. - */ - if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) - INTELPllInvalid("dot out of range\n"); - - return true; -} - -/** - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ -static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, - int refclk, - struct psb_intel_clock_t *best_clock) -{ - struct drm_device *dev = crtc->dev; - struct psb_intel_clock_t clock; - const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); - int err = target; - - if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && - (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { - /* - * For LVDS, if the panel is on, just rely on its current - * settings for dual-channel. We haven't figured out how to - * reliably set up different single/dual channel state, if we - * even can. - */ - if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) - clock.p2 = limit->p2.p2_fast; - else - clock.p2 = limit->p2.p2_slow; - } else { - if (target < limit->p2.dot_limit) - clock.p2 = limit->p2.p2_slow; - else - clock.p2 = limit->p2.p2_fast; - } - - memset(best_clock, 0, sizeof(*best_clock)); - - for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; - clock.m1++) { - for (clock.m2 = limit->m2.min; - clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; - clock.m2++) { - for (clock.n = limit->n.min; - clock.n <= limit->n.max; clock.n++) { - for (clock.p1 = limit->p1.min; - clock.p1 <= limit->p1.max; - clock.p1++) { - int this_err; - - psb_intel_clock(refclk, &clock); - - if (!psb_intel_PLL_is_valid - (crtc, &clock)) - continue; - - this_err = abs(clock.dot - target); - if (this_err < err) { - *best_clock = clock; - err = this_err; - } - } - } - } - } - - return err != target; -} - void psb_intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ @@ -484,12 +335,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, int pipe = psb_intel_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk; - struct psb_intel_clock_t clock; + struct gma_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false; bool is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + const struct gma_limit_t *limit; /* No scan out no play */ if (crtc->fb == NULL) { @@ -520,10 +372,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, refclk = 96000; - ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, + limit = psb_intel_crtc->clock_funcs->limit(crtc, refclk); + + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { - dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); + DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d", + adjusted_mode->clock, clock.dot); return 0; } @@ -1022,7 +877,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev, const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 dpll; u32 fp; - struct psb_intel_clock_t clock; + struct gma_clock_t clock; bool is_lvds; struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; @@ -1190,6 +1045,12 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = { .destroy = psb_intel_crtc_destroy, }; +const struct gma_clock_funcs psb_clock_funcs = { + .clock = psb_intel_clock, + .limit = psb_intel_limit, + .pll_is_valid = gma_pll_is_valid, +}; + /* * Set the default value of cursor control and base register * to zero. This is a workaround for h/w defect on Oaktrail diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h index 3724b971e91c..6e9007d86f72 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.h +++ b/drivers/gpu/drm/gma500/psb_intel_display.h @@ -20,6 +20,4 @@ #ifndef _INTEL_DISPLAY_H_ #define _INTEL_DISPLAY_H_ -bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); - #endif -- cgit v1.2.3 From f0e9d89b9b7f3c4b1d21ca2e0d25e3ebe5d2a1d2 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Mon, 1 Jul 2013 23:14:58 +0200 Subject: drm/gma500: Remove the unused psb_intel_display.h Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.h | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 drivers/gpu/drm/gma500/psb_intel_display.h diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h deleted file mode 100644 index 6e9007d86f72..000000000000 --- a/drivers/gpu/drm/gma500/psb_intel_display.h +++ /dev/null @@ -1,23 +0,0 @@ -/* copyright (c) 2008, Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Authors: - * Eric Anholt - */ - -#ifndef _INTEL_DISPLAY_H_ -#define _INTEL_DISPLAY_H_ - -#endif -- cgit v1.2.3 From 2eff0b3359c097bbcfe4850dfdf9c94e514ddfee Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 5 Jul 2013 16:41:49 +0200 Subject: drm/gma500: Add generic pipe/crtc functions Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/gma_display.c | 326 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 14 ++ 2 files changed, 340 insertions(+) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 8f66d5c6505b..297937d12fb9 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -24,6 +24,7 @@ #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_drv.h" +#include "framebuffer.h" /** * Returns whether any output on the specified pipe is of the specified type @@ -46,6 +47,331 @@ bool gma_pipe_has_type(struct drm_crtc *crtc, int type) return false; } +void gma_wait_for_vblank(struct drm_device *dev) +{ + /* Wait for 20ms, i.e. one cycle at 50hz. */ + mdelay(20); +} + +int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); + int pipe = psb_intel_crtc->pipe; + const struct psb_offset *map = &dev_priv->regmap[pipe]; + unsigned long start, offset; + u32 dspcntr; + int ret = 0; + + if (!gma_power_begin(dev, true)) + return 0; + + /* no fb bound */ + if (!crtc->fb) { + dev_err(dev->dev, "No FB bound\n"); + goto gma_pipe_cleaner; + } + + /* We are displaying this buffer, make sure it is actually loaded + into the GTT */ + ret = psb_gtt_pin(psbfb->gtt); + if (ret < 0) + goto gma_pipe_set_base_exit; + start = psbfb->gtt->offset; + offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); + + REG_WRITE(map->stride, crtc->fb->pitches[0]); + + dspcntr = REG_READ(map->cntr); + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; + + switch (crtc->fb->bits_per_pixel) { + case 8: + dspcntr |= DISPPLANE_8BPP; + break; + case 16: + if (crtc->fb->depth == 15) + dspcntr |= DISPPLANE_15_16BPP; + else + dspcntr |= DISPPLANE_16BPP; + break; + case 24: + case 32: + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; + break; + default: + dev_err(dev->dev, "Unknown color depth\n"); + ret = -EINVAL; + goto gma_pipe_set_base_exit; + } + REG_WRITE(map->cntr, dspcntr); + + dev_dbg(dev->dev, + "Writing base %08lX %08lX %d %d\n", start, offset, x, y); + + /* FIXME: Investigate whether this really is the base for psb and why + the linear offset is named base for the other chips. map->surf + should be the base and map->linoff the offset for all chips */ + if (IS_PSB(dev)) { + REG_WRITE(map->base, offset + start); + REG_READ(map->base); + } else { + REG_WRITE(map->base, offset); + REG_READ(map->base); + REG_WRITE(map->surf, start); + REG_READ(map->surf); + } + +gma_pipe_cleaner: + /* If there was a previous display we can now unpin it */ + if (old_fb) + psb_gtt_unpin(to_psb_fb(old_fb)->gtt); + +gma_pipe_set_base_exit: + gma_power_end(dev); + return ret; +} + +/* Loads the palette/gamma unit for the CRTC with the prepared values */ +void gma_crtc_load_lut(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + int palreg = map->palette; + int i; + + /* The clocks have to be on to load the palette. */ + if (!crtc->enabled) + return; + + if (gma_power_begin(dev, false)) { + for (i = 0; i < 256; i++) { + REG_WRITE(palreg + 4 * i, + ((psb_intel_crtc->lut_r[i] + + psb_intel_crtc->lut_adj[i]) << 16) | + ((psb_intel_crtc->lut_g[i] + + psb_intel_crtc->lut_adj[i]) << 8) | + (psb_intel_crtc->lut_b[i] + + psb_intel_crtc->lut_adj[i])); + } + gma_power_end(dev); + } else { + for (i = 0; i < 256; i++) { + /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ + dev_priv->regs.pipe[0].palette[i] = + ((psb_intel_crtc->lut_r[i] + + psb_intel_crtc->lut_adj[i]) << 16) | + ((psb_intel_crtc->lut_g[i] + + psb_intel_crtc->lut_adj[i]) << 8) | + (psb_intel_crtc->lut_b[i] + + psb_intel_crtc->lut_adj[i]); + } + + } +} + +void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, + u32 start, u32 size) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int i; + int end = (start + size > 256) ? 256 : start + size; + + for (i = start; i < end; i++) { + psb_intel_crtc->lut_r[i] = red[i] >> 8; + psb_intel_crtc->lut_g[i] = green[i] >> 8; + psb_intel_crtc->lut_b[i] = blue[i] >> 8; + } + + gma_crtc_load_lut(crtc); +} + +/** + * Sets the power management mode of the pipe and plane. + * + * This code should probably grow support for turning the cursor off and back + * on appropriately at the same time as we're turning the pipe off/on. + */ +void gma_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + const struct psb_offset *map = &dev_priv->regmap[pipe]; + u32 temp; + + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + + /* FIXME: Uncomment this when we move cdv to generic dpms + if (IS_CDV(dev)) + cdv_intel_disable_self_refresh(dev); + */ + + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + if (psb_intel_crtc->active) + break; + + psb_intel_crtc->active = true; + + /* Enable the DPLL */ + temp = REG_READ(map->dpll); + if ((temp & DPLL_VCO_ENABLE) == 0) { + REG_WRITE(map->dpll, temp); + REG_READ(map->dpll); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); + REG_READ(map->dpll); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); + REG_READ(map->dpll); + /* Wait for the clocks to stabilize. */ + udelay(150); + } + + /* Enable the plane */ + temp = REG_READ(map->cntr); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + REG_WRITE(map->cntr, + temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(map->base, REG_READ(map->base)); + } + + udelay(150); + + /* Enable the pipe */ + temp = REG_READ(map->conf); + if ((temp & PIPEACONF_ENABLE) == 0) + REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); + + temp = REG_READ(map->status); + temp &= ~(0xFFFF); + temp |= PIPE_FIFO_UNDERRUN; + REG_WRITE(map->status, temp); + REG_READ(map->status); + + gma_crtc_load_lut(crtc); + + /* Give the overlay scaler a chance to enable + * if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, true); TODO */ + break; + case DRM_MODE_DPMS_OFF: + if (!psb_intel_crtc->active) + break; + + psb_intel_crtc->active = false; + + /* Give the overlay scaler a chance to disable + * if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ + + /* Disable the VGA plane that we never use */ + REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); + + /* Turn off vblank interrupts */ + drm_vblank_off(dev, pipe); + + /* Wait for vblank for the disable to take effect */ + gma_wait_for_vblank(dev); + + /* Disable plane */ + temp = REG_READ(map->cntr); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + REG_WRITE(map->cntr, + temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(map->base, REG_READ(map->base)); + REG_READ(map->base); + } + + /* Disable pipe */ + temp = REG_READ(map->conf); + if ((temp & PIPEACONF_ENABLE) != 0) { + REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); + REG_READ(map->conf); + } + + /* Wait for vblank for the disable to take effect. */ + gma_wait_for_vblank(dev); + + udelay(150); + + /* Disable DPLL */ + temp = REG_READ(map->dpll); + if ((temp & DPLL_VCO_ENABLE) != 0) { + REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); + REG_READ(map->dpll); + } + + /* Wait for the clocks to turn off. */ + udelay(150); + break; + } + + /* FIXME: Uncomment this when we move cdv to generic dpms + if (IS_CDV(dev)) + cdv_intel_update_watermark(dev, crtc); + */ + + /* Set FIFO watermarks */ + REG_WRITE(DSPARB, 0x3F3E); +} + +bool gma_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +void gma_crtc_prepare(struct drm_crtc *crtc) +{ + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); +} + +void gma_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); +} + +void gma_crtc_disable(struct drm_crtc *crtc) +{ + struct gtt_range *gt; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (crtc->fb) { + gt = to_psb_fb(crtc->fb)->gtt; + psb_gtt_unpin(gt); + } +} + +void gma_crtc_destroy(struct drm_crtc *crtc) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + + kfree(psb_intel_crtc->crtc_state); + drm_crtc_cleanup(crtc); + kfree(psb_intel_crtc); +} + #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } bool gma_pll_is_valid(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index a5d8aa31b5b7..24a582e009e6 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -61,6 +61,20 @@ struct gma_clock_funcs { /* Common pipe related functions */ extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type); +extern void gma_wait_for_vblank(struct drm_device *dev); +extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); +extern void gma_crtc_load_lut(struct drm_crtc *crtc); +extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, u32 start, u32 size); +extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); +extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +extern void gma_crtc_prepare(struct drm_crtc *crtc); +extern void gma_crtc_commit(struct drm_crtc *crtc); +extern void gma_crtc_disable(struct drm_crtc *crtc); +extern void gma_crtc_destroy(struct drm_crtc *crtc); /* Common clock related functions */ extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); -- cgit v1.2.3 From ad3c46eae3f51b34adea55e0625d255b21ec0a15 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Tue, 9 Jul 2013 20:03:01 +0200 Subject: drm/gma500/cdv: Use identical generic crtc funcs This patch makes cdv use the gma_xxx counterparts that are identical. I took them in one sweep as they should not cause any regressions. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_device.h | 9 ---- drivers/gpu/drm/gma500/cdv_intel_display.c | 74 +++++++----------------------- 2 files changed, 16 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 0fcb608bbdf9..602406bb6d0f 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h @@ -26,12 +26,3 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device * int reg); extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); - -static inline void cdv_intel_wait_for_vblank(struct drm_device *dev) -{ - /* Wait for 20ms, i.e. one cycle at 50hz. */ - /* FIXME: msleep ?? */ - mdelay(20); -} - - diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index fe6c6594eb19..ae57b93bdadf 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -595,7 +595,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev) REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN)); REG_READ(FW_BLC_SELF); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); /* Cedarview workaround to write ovelay plane, which force to leave * MAX_FIFO state. @@ -603,7 +603,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev) REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/); REG_READ(OV_OVADD); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); } } @@ -644,12 +644,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc REG_WRITE(DSPFW6, 0x10); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); /* enable self-refresh for single pipe active */ REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); REG_READ(FW_BLC_SELF); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); } else { @@ -661,7 +661,7 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc REG_WRITE(DSPFW5, 0x01010101); REG_WRITE(DSPFW6, 0x1d0); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); cdv_intel_disable_self_refresh(dev); @@ -812,7 +812,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) drm_vblank_off(dev, pipe); /* Wait for vblank for the disable to take effect */ - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); /* Next, disable display pipes */ temp = REG_READ(map->conf); @@ -822,7 +822,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Wait for vblank for the disable to take effect. */ - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); udelay(150); @@ -851,26 +851,6 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(DSPARB, 0x3F3E); } -static void cdv_intel_crtc_prepare(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void cdv_intel_crtc_commit(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -} - -static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1129,7 +1109,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->conf, pipeconf); REG_READ(map->conf); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(map->cntr, dspcntr); @@ -1140,7 +1120,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, crtc_funcs->mode_set_base(crtc, x, y, old_fb); } - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); return 0; } @@ -1301,12 +1281,12 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc) REG_WRITE(map->base, crtc_state->saveDSPBASE); REG_WRITE(map->conf, crtc_state->savePIPECONF); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); REG_WRITE(map->base, crtc_state->saveDSPBASE); - cdv_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); paletteReg = map->palette; for (i = 0; i < 256; ++i) @@ -1612,36 +1592,14 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, return mode; } -static void cdv_intel_crtc_destroy(struct drm_crtc *crtc) -{ - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - - kfree(psb_intel_crtc->crtc_state); - drm_crtc_cleanup(crtc); - kfree(psb_intel_crtc); -} - -static void cdv_intel_crtc_disable(struct drm_crtc *crtc) -{ - struct gtt_range *gt; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - - if (crtc->fb) { - gt = to_psb_fb(crtc->fb)->gtt; - psb_gtt_unpin(gt); - } -} - const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .dpms = cdv_intel_crtc_dpms, - .mode_fixup = cdv_intel_crtc_mode_fixup, + .mode_fixup = gma_crtc_mode_fixup, .mode_set = cdv_intel_crtc_mode_set, .mode_set_base = cdv_intel_pipe_set_base, - .prepare = cdv_intel_crtc_prepare, - .commit = cdv_intel_crtc_commit, - .disable = cdv_intel_crtc_disable, + .prepare = gma_crtc_prepare, + .commit = gma_crtc_commit, + .disable = gma_crtc_disable, }; const struct drm_crtc_funcs cdv_intel_crtc_funcs = { @@ -1651,7 +1609,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = { .cursor_move = cdv_intel_crtc_cursor_move, .gamma_set = cdv_intel_crtc_gamma_set, .set_config = cdv_crtc_set_config, - .destroy = cdv_intel_crtc_destroy, + .destroy = gma_crtc_destroy, }; const struct gma_clock_funcs cdv_clock_funcs = { -- cgit v1.2.3 From d1fa08f3bacb6fc9a7642c85a4fa8976a3f1afac Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 01:20:19 +0200 Subject: drm/gma500: Make all chips use gma_wait_for_vblank Also remove the duplicated oaktrail function. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_dp.c | 3 ++- drivers/gpu/drm/gma500/mdfld_intel_display.c | 6 +++--- drivers/gpu/drm/gma500/oaktrail_crtc.c | 6 +++--- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 12 +++--------- drivers/gpu/drm/gma500/psb_intel_display.c | 16 +++++----------- drivers/gpu/drm/gma500/psb_intel_drv.h | 1 - drivers/gpu/drm/gma500/psb_intel_sdvo.c | 2 +- 7 files changed, 17 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 88d9ef6b5b4a..839ab83ff6ec 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -34,6 +34,7 @@ #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" +#include "gma_display.h" #include #define _wait_for(COND, MS, W) ({ \ @@ -1317,7 +1318,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) /* Enable output, wait for it to become active */ REG_WRITE(intel_dp->output_reg, reg); REG_READ(intel_dp->output_reg); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); DRM_DEBUG_KMS("Link config\n"); /* Write the link configuration data */ diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index aa6528dae072..da83fddbc9a8 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe) } /* FIXME JLIU7_PO */ - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); return; /* Wait for for the pipe disable to take effect. */ @@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) } /* FIXME JLIU7_PO */ - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); return; /* Wait for for the pipe enable to take effect. */ @@ -1034,7 +1034,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, /* Wait for for the pipe enable to take effect. */ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); mrst_crtc_mode_set_exit: diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 75567ee8a519..8af461f3a72d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) REG_READ(map->conf); } /* Wait for for the pipe disable to take effect. */ - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); temp = REG_READ(map->dpll); if ((temp & DPLL_VCO_ENABLE) != 0) { @@ -484,10 +484,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->conf, pipeconf); REG_READ(map->conf); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(map->cntr, dspcntr); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); oaktrail_crtc_mode_set_exit: gma_power_end(dev); diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index f036f1fc161e..d9013f70b019 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev) HDMI_READ(HDMI_HCR); } -static void wait_for_vblank(struct drm_device *dev) -{ - /* Wait for 20ms, i.e. one cycle at 50hz. */ - mdelay(20); -} - static unsigned int htotal_calculate(struct drm_display_mode *mode) { u32 htotal, new_crtc_htotal; @@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, REG_WRITE(PCH_PIPEBCONF, pipeconf); REG_READ(PCH_PIPEBCONF); - wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(dspcntr_reg, dspcntr); - wait_for_vblank(dev); + gma_wait_for_vblank(dev); gma_power_end(dev); @@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode) REG_READ(PCH_PIPEBCONF); } - wait_for_vblank(dev); + gma_wait_for_vblank(dev); /* Enable plane */ temp = REG_READ(DSPBCNTR); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 89be7a3632ef..fa57864f210d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -82,12 +82,6 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock) clock->dot = clock->vco / clock->p; } -void psb_intel_wait_for_vblank(struct drm_device *dev) -{ - /* Wait for 20ms, i.e. one cycle at 50hz. */ - mdelay(20); -} - static int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -244,7 +238,7 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Wait for vblank for the disable to take effect. */ - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); temp = REG_READ(map->dpll); if ((temp & DPLL_VCO_ENABLE) != 0) { @@ -516,14 +510,14 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->conf, pipeconf); REG_READ(map->conf); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(map->cntr, dspcntr); /* Flush the plane changes */ crtc_funcs->mode_set_base(crtc, x, y, old_fb); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); return 0; } @@ -669,12 +663,12 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc) REG_WRITE(map->base, crtc_state->saveDSPBASE); REG_WRITE(map->conf, crtc_state->savePIPECONF); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); REG_WRITE(map->base, crtc_state->saveDSPBASE); - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); paletteReg = map->palette; for (i = 0; i < 256; ++i) diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index bfe0408c1291..596850210a51 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -246,7 +246,6 @@ extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); -extern void psb_intel_wait_for_vblank(struct drm_device *dev); extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..e3d1078ecf09 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1121,7 +1121,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) if ((temp & SDVO_ENABLE) == 0) psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) - psb_intel_wait_for_vblank(dev); + gma_wait_for_vblank(dev); status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. -- cgit v1.2.3 From 4855177ed0d94621eaf1c6bec64f16318a8be5fe Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 17:40:54 +0200 Subject: drm/gma500/psb: Use identical generic crtc funcs This patch makes psb use the gma_xxx counterparts that are identical. I took them in one sweep as they should not cause any regressions. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 41 +++--------------------------- 1 file changed, 4 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index fa57864f210d..0642e7d37fa8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -255,18 +255,6 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(DSPARB, 0x3F3E); } -static void psb_intel_crtc_prepare(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void psb_intel_crtc_commit(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -} - void psb_intel_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = @@ -291,14 +279,6 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder) kfree(intel_encoder); } -static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1006,27 +986,14 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } -static void psb_intel_crtc_disable(struct drm_crtc *crtc) -{ - struct gtt_range *gt; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - - if (crtc->fb) { - gt = to_psb_fb(crtc->fb)->gtt; - psb_gtt_unpin(gt); - } -} - const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, - .mode_fixup = psb_intel_crtc_mode_fixup, + .mode_fixup = gma_crtc_mode_fixup, .mode_set = psb_intel_crtc_mode_set, .mode_set_base = psb_intel_pipe_set_base, - .prepare = psb_intel_crtc_prepare, - .commit = psb_intel_crtc_commit, - .disable = psb_intel_crtc_disable, + .prepare = gma_crtc_prepare, + .commit = gma_crtc_commit, + .disable = gma_crtc_disable, }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { -- cgit v1.2.3 From 3c447166536c80209f0dcb300cdffd76686187aa Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 17:58:04 +0200 Subject: drm/gma500/cdv: Convert to gma_pipe_set_base() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 77 +----------------------------- 1 file changed, 1 insertion(+), 76 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index ae57b93bdadf..9eee57b3cc59 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -452,81 +452,6 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, return true; } -static int cdv_intel_pipe_set_base(struct drm_crtc *crtc, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); - int pipe = psb_intel_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - unsigned long start, offset; - u32 dspcntr; - int ret = 0; - - if (!gma_power_begin(dev, true)) - return 0; - - /* no fb bound */ - if (!crtc->fb) { - dev_err(dev->dev, "No FB bound\n"); - goto psb_intel_pipe_cleaner; - } - - - /* We are displaying this buffer, make sure it is actually loaded - into the GTT */ - ret = psb_gtt_pin(psbfb->gtt); - if (ret < 0) - goto psb_intel_pipe_set_base_exit; - start = psbfb->gtt->offset; - offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); - - REG_WRITE(map->stride, crtc->fb->pitches[0]); - - dspcntr = REG_READ(map->cntr); - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - - switch (crtc->fb->bits_per_pixel) { - case 8: - dspcntr |= DISPPLANE_8BPP; - break; - case 16: - if (crtc->fb->depth == 15) - dspcntr |= DISPPLANE_15_16BPP; - else - dspcntr |= DISPPLANE_16BPP; - break; - case 24: - case 32: - dspcntr |= DISPPLANE_32BPP_NO_ALPHA; - break; - default: - dev_err(dev->dev, "Unknown color depth\n"); - ret = -EINVAL; - goto psb_intel_pipe_set_base_exit; - } - REG_WRITE(map->cntr, dspcntr); - - dev_dbg(dev->dev, - "Writing base %08lX %08lX %d %d\n", start, offset, x, y); - - REG_WRITE(map->base, offset); - REG_READ(map->base); - REG_WRITE(map->surf, start); - REG_READ(map->surf); - -psb_intel_pipe_cleaner: - /* If there was a previous display we can now unpin it */ - if (old_fb) - psb_gtt_unpin(to_psb_fb(old_fb)->gtt); - -psb_intel_pipe_set_base_exit: - gma_power_end(dev); - return ret; -} - #define FIFO_PIPEA (1 << 0) #define FIFO_PIPEB (1 << 1) @@ -1596,7 +1521,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .dpms = cdv_intel_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, .mode_set = cdv_intel_crtc_mode_set, - .mode_set_base = cdv_intel_pipe_set_base, + .mode_set_base = gma_pipe_set_base, .prepare = gma_crtc_prepare, .commit = gma_crtc_commit, .disable = gma_crtc_disable, -- cgit v1.2.3 From b8e5ec9f306744e19357580a3cf47452fe64b27a Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:02:04 +0200 Subject: drm/gma500: Add IS_CDV() macro This macro is needed for Cedarview specific stuff in the generic gma functions. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_drv.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index eeed88c3c37e..ed1e567b7e3b 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -47,6 +47,7 @@ enum { #define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) #define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) #define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) +#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0) /* * Driver definitions -- cgit v1.2.3 From 7ea03f069572fef5701b8be90aed1cfd0b64d76e Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:12:11 +0200 Subject: drm/gma500/cdv: Convert to gma_crtc_dpms() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 137 +---------------------------- drivers/gpu/drm/gma500/gma_display.c | 4 - drivers/gpu/drm/gma500/gma_display.h | 5 ++ 3 files changed, 8 insertions(+), 138 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 9eee57b3cc59..1160175f16f9 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -512,7 +512,7 @@ static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) return false; } -static void cdv_intel_disable_self_refresh (struct drm_device *dev) +void cdv_intel_disable_self_refresh(struct drm_device *dev) { if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { @@ -533,7 +533,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev) } -static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc) +void cdv_intel_update_watermark(struct drm_device *dev, struct drm_crtc *crtc) { if (cdv_intel_single_pipe_active(dev)) { @@ -645,137 +645,6 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc) } } -/** - * Sets the power management mode of the pipe and plane. - * - * This code should probably grow support for turning the cursor off and back - * on appropriately at the same time as we're turning the pipe off/on. - */ -static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - u32 temp; - - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. - */ - cdv_intel_disable_self_refresh(dev); - - switch (mode) { - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - if (psb_intel_crtc->active) - break; - - psb_intel_crtc->active = true; - - /* Enable the DPLL */ - temp = REG_READ(map->dpll); - if ((temp & DPLL_VCO_ENABLE) == 0) { - REG_WRITE(map->dpll, temp); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - } - - /* Jim Bish - switch plan and pipe per scott */ - /* Enable the plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - REG_WRITE(map->cntr, - temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - } - - udelay(150); - - /* Enable the pipe */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) == 0) - REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); - - temp = REG_READ(map->status); - temp &= ~(0xFFFF); - temp |= PIPE_FIFO_UNDERRUN; - REG_WRITE(map->status, temp); - REG_READ(map->status); - - cdv_intel_crtc_load_lut(crtc); - - /* Give the overlay scaler a chance to enable - * if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, true); TODO */ - break; - case DRM_MODE_DPMS_OFF: - if (!psb_intel_crtc->active) - break; - - psb_intel_crtc->active = false; - - /* Give the overlay scaler a chance to disable - * if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Jim Bish - changed pipe/plane here as well. */ - - drm_vblank_off(dev, pipe); - /* Wait for vblank for the disable to take effect */ - gma_wait_for_vblank(dev); - - /* Next, disable display pipes */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); - REG_READ(map->conf); - } - - /* Wait for vblank for the disable to take effect. */ - gma_wait_for_vblank(dev); - - udelay(150); - - /* Disable display plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(map->cntr, - temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - REG_READ(map->base); - } - - temp = REG_READ(map->dpll); - if ((temp & DPLL_VCO_ENABLE) != 0) { - REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); - REG_READ(map->dpll); - } - - /* Wait for the clocks to turn off. */ - udelay(150); - break; - } - cdv_intel_update_watermark(dev, crtc); - /*Set FIFO Watermarks*/ - REG_WRITE(DSPARB, 0x3F3E); -} - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1518,7 +1387,7 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, } const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { - .dpms = cdv_intel_crtc_dpms, + .dpms = gma_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, .mode_set = cdv_intel_crtc_mode_set, .mode_set_base = gma_pipe_set_base, diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 297937d12fb9..7a0888a64a33 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -210,10 +210,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ - /* FIXME: Uncomment this when we move cdv to generic dpms if (IS_CDV(dev)) cdv_intel_disable_self_refresh(dev); - */ switch (mode) { case DRM_MODE_DPMS_ON: @@ -322,10 +320,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) break; } - /* FIXME: Uncomment this when we move cdv to generic dpms if (IS_CDV(dev)) cdv_intel_update_watermark(dev, crtc); - */ /* Set FIFO watermarks */ REG_WRITE(DSPARB, 0x3F3E); diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 24a582e009e6..665164d41224 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -85,4 +85,9 @@ extern bool gma_pll_is_valid(struct drm_crtc *crtc, extern bool gma_find_best_pll(const struct gma_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, struct gma_clock_t *best_clock); + +/* Cedarview specific functions */ +extern void cdv_intel_disable_self_refresh(struct drm_device *dev); +extern void cdv_intel_update_watermark(struct drm_device *dev, + struct drm_crtc *crtc); #endif -- cgit v1.2.3 From a1f4efe4416dbd8d58486e60752f3a8145aa84c9 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:23:29 +0200 Subject: drm/gma500/cdv: Convert to generic gamma funcs There is a slight difference in how we pick the palette register in the generic function but we should be ok as long as psb_intel_crtc->pipe and the register map is sane. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 70 +----------------------------- 1 file changed, 1 insertion(+), 69 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 1160175f16f9..e30761a9bf83 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -593,58 +593,6 @@ void cdv_intel_update_watermark(struct drm_device *dev, struct drm_crtc *crtc) } } -/** Loads the palette/gamma unit for the CRTC with the prepared values */ -static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int palreg = PALETTE_A; - int i; - - /* The clocks have to be on to load the palette. */ - if (!crtc->enabled) - return; - - switch (psb_intel_crtc->pipe) { - case 0: - break; - case 1: - palreg = PALETTE_B; - break; - case 2: - palreg = PALETTE_C; - break; - default: - dev_err(dev->dev, "Illegal Pipe Number.\n"); - return; - } - - if (gma_power_begin(dev, false)) { - for (i = 0; i < 256; i++) { - REG_WRITE(palreg + 4 * i, - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i])); - } - gma_power_end(dev); - } else { - for (i = 0; i < 256; i++) { - dev_priv->regs.pipe[0].palette[i] = - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i]); - } - - } -} - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1213,22 +1161,6 @@ static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, - u16 *green, u16 *blue, uint32_t start, uint32_t size) -{ - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int i; - int end = (start + size > 256) ? 256 : start + size; - - for (i = start; i < end; i++) { - psb_intel_crtc->lut_r[i] = red[i] >> 8; - psb_intel_crtc->lut_g[i] = green[i] >> 8; - psb_intel_crtc->lut_b[i] = blue[i] >> 8; - } - - cdv_intel_crtc_load_lut(crtc); -} - static int cdv_crtc_set_config(struct drm_mode_set *set) { int ret = 0; @@ -1401,7 +1333,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = { .restore = cdv_intel_crtc_restore, .cursor_set = cdv_intel_crtc_cursor_set, .cursor_move = cdv_intel_crtc_cursor_move, - .gamma_set = cdv_intel_crtc_gamma_set, + .gamma_set = gma_crtc_gamma_set, .set_config = cdv_crtc_set_config, .destroy = gma_crtc_destroy, }; -- cgit v1.2.3 From 00b1fe7445d8a3cd81ba564fba5d15dcbe26f23b Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:37:03 +0200 Subject: drm/gma500/psb: Convert to gma_pipe_set_base() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 73 +----------------------------- 1 file changed, 1 insertion(+), 72 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 0642e7d37fa8..ca041c6aba4a 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -82,77 +82,6 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock) clock->dot = clock->vco / clock->p; } -static int psb_intel_pipe_set_base(struct drm_crtc *crtc, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); - int pipe = psb_intel_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - unsigned long start, offset; - u32 dspcntr; - int ret = 0; - - if (!gma_power_begin(dev, true)) - return 0; - - /* no fb bound */ - if (!crtc->fb) { - dev_dbg(dev->dev, "No FB bound\n"); - goto psb_intel_pipe_cleaner; - } - - /* We are displaying this buffer, make sure it is actually loaded - into the GTT */ - ret = psb_gtt_pin(psbfb->gtt); - if (ret < 0) - goto psb_intel_pipe_set_base_exit; - start = psbfb->gtt->offset; - - offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); - - REG_WRITE(map->stride, crtc->fb->pitches[0]); - - dspcntr = REG_READ(map->cntr); - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - - switch (crtc->fb->bits_per_pixel) { - case 8: - dspcntr |= DISPPLANE_8BPP; - break; - case 16: - if (crtc->fb->depth == 15) - dspcntr |= DISPPLANE_15_16BPP; - else - dspcntr |= DISPPLANE_16BPP; - break; - case 24: - case 32: - dspcntr |= DISPPLANE_32BPP_NO_ALPHA; - break; - default: - dev_err(dev->dev, "Unknown color depth\n"); - ret = -EINVAL; - psb_gtt_unpin(psbfb->gtt); - goto psb_intel_pipe_set_base_exit; - } - REG_WRITE(map->cntr, dspcntr); - - REG_WRITE(map->base, start + offset); - REG_READ(map->base); - -psb_intel_pipe_cleaner: - /* If there was a previous display we can now unpin it */ - if (old_fb) - psb_gtt_unpin(to_psb_fb(old_fb)->gtt); - -psb_intel_pipe_set_base_exit: - gma_power_end(dev); - return ret; -} - /** * Sets the power management mode of the pipe and plane. * @@ -990,7 +919,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, .mode_set = psb_intel_crtc_mode_set, - .mode_set_base = psb_intel_pipe_set_base, + .mode_set_base = gma_pipe_set_base, .prepare = gma_crtc_prepare, .commit = gma_crtc_commit, .disable = gma_crtc_disable, -- cgit v1.2.3 From 6443ea1aca56f011432b6ea66ec4cc21a813bb0d Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:39:58 +0200 Subject: drm/gma500: Convert to generic gamma funcs This takes care of the remaining chips using the old generic code. We don't check if the pipe number is valid but the old code peeked in the register map before checking anyways so just ignore it. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/mdfld_intel_display.c | 2 +- drivers/gpu/drm/gma500/oaktrail_crtc.c | 2 +- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 2 +- drivers/gpu/drm/gma500/psb_drv.c | 2 +- drivers/gpu/drm/gma500/psb_intel_display.c | 70 +--------------------------- drivers/gpu/drm/gma500/psb_intel_drv.h | 1 - 6 files changed, 6 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index da83fddbc9a8..2862cdb8920a 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -436,7 +436,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode) } } - psb_intel_crtc_load_lut(crtc); + gma_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable if it's on this pipe */ diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 8af461f3a72d..5cd007e82e59 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(map->base, REG_READ(map->base)); } - psb_intel_crtc_load_lut(crtc); + gma_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable if it's on this pipe */ diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index d9013f70b019..7d9a5ee52814 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -464,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode) REG_READ(DSPBSURF); } - psb_intel_crtc_load_lut(crtc); + gma_crtc_load_lut(crtc); } /* DSPARB */ diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index bddea5807442..b4d13261f762 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -459,7 +459,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data, for (i = 0; i < 256; i++) psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; - psb_intel_crtc_load_lut(crtc); + gma_crtc_load_lut(crtc); return 0; } diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index ca041c6aba4a..940c6f2fb8ee 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -135,7 +135,7 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(map->base, REG_READ(map->base)); } - psb_intel_crtc_load_lut(crtc); + gma_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable * if it's on this pipe */ @@ -431,54 +431,6 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } -/** Loads the palette/gamma unit for the CRTC with the prepared values */ -void psb_intel_crtc_load_lut(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; - int palreg = map->palette; - int i; - - /* The clocks have to be on to load the palette. */ - if (!crtc->enabled) - return; - - switch (psb_intel_crtc->pipe) { - case 0: - case 1: - break; - default: - dev_err(dev->dev, "Illegal Pipe Number.\n"); - return; - } - - if (gma_power_begin(dev, false)) { - for (i = 0; i < 256; i++) { - REG_WRITE(palreg + 4 * i, - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i])); - } - gma_power_end(dev); - } else { - for (i = 0; i < 256; i++) { - dev_priv->regs.pipe[0].palette[i] = - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i]); - } - - } -} - /** * Save HW states of giving crtc */ @@ -737,24 +689,6 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, - u16 *green, u16 *blue, uint32_t type, uint32_t size) -{ - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int i; - - if (size != 256) - return; - - for (i = 0; i < 256; i++) { - psb_intel_crtc->lut_r[i] = red[i] >> 8; - psb_intel_crtc->lut_g[i] = green[i] >> 8; - psb_intel_crtc->lut_b[i] = blue[i] >> 8; - } - - psb_intel_crtc_load_lut(crtc); -} - static int psb_crtc_set_config(struct drm_mode_set *set) { int ret; @@ -930,7 +864,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = { .restore = psb_intel_crtc_restore, .cursor_set = psb_intel_crtc_cursor_set, .cursor_move = psb_intel_crtc_cursor_move, - .gamma_set = psb_intel_crtc_gamma_set, + .gamma_set = gma_crtc_gamma_set, .set_config = psb_crtc_set_config, .destroy = psb_intel_crtc_destroy, }; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 596850210a51..c8cd9bee7a89 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -226,7 +226,6 @@ extern void oaktrail_dsi_init(struct drm_device *dev, extern void mid_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int dsi_num); -extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); extern void psb_intel_encoder_commit(struct drm_encoder *encoder); extern void psb_intel_encoder_destroy(struct drm_encoder *encoder); -- cgit v1.2.3 From 42568dd5d3b5bff18d9dbc6f2f2814ed28753ada Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 18:44:25 +0200 Subject: drm/gma500/psb: Convert to gma_crtc_dpms() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 104 +---------------------------- 1 file changed, 1 insertion(+), 103 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 940c6f2fb8ee..b52bde2a0fe9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -82,108 +82,6 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock) clock->dot = clock->vco / clock->p; } -/** - * Sets the power management mode of the pipe and plane. - * - * This code should probably grow support for turning the cursor off and back - * on appropriately at the same time as we're turning the pipe off/on. - */ -static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - u32 temp; - - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. - */ - switch (mode) { - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - /* Enable the DPLL */ - temp = REG_READ(map->dpll); - if ((temp & DPLL_VCO_ENABLE) == 0) { - REG_WRITE(map->dpll, temp); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); - REG_READ(map->dpll); - /* Wait for the clocks to stabilize. */ - udelay(150); - } - - /* Enable the pipe */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) == 0) - REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); - - /* Enable the plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - REG_WRITE(map->cntr, - temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - } - - gma_crtc_load_lut(crtc); - - /* Give the overlay scaler a chance to enable - * if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, true); TODO */ - break; - case DRM_MODE_DPMS_OFF: - /* Give the overlay scaler a chance to disable - * if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Disable display plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(map->cntr, - temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - REG_READ(map->base); - } - - /* Next, disable display pipes */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); - REG_READ(map->conf); - } - - /* Wait for vblank for the disable to take effect. */ - gma_wait_for_vblank(dev); - - temp = REG_READ(map->dpll); - if ((temp & DPLL_VCO_ENABLE) != 0) { - REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); - REG_READ(map->dpll); - } - - /* Wait for the clocks to turn off. */ - udelay(150); - break; - } - - /*Set FIFO Watermarks*/ - REG_WRITE(DSPARB, 0x3F3E); -} - void psb_intel_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = @@ -850,7 +748,7 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc) } const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { - .dpms = psb_intel_crtc_dpms, + .dpms = gma_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, .mode_set = psb_intel_crtc_mode_set, .mode_set_base = gma_pipe_set_base, -- cgit v1.2.3 From fe5802957f2856b20a408b8933472a27d00e5f77 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 21:52:19 +0200 Subject: drm/gma500/oak: Use identical generic crtc funcs Use the generic gma functions instead of the oaktrail functions where they are identical. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/oaktrail_crtc.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 5cd007e82e59..504ae1117782 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -494,13 +494,6 @@ oaktrail_crtc_mode_set_exit: return 0; } -static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static int oaktrail_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -563,24 +556,12 @@ pipe_set_base_exit: return ret; } -static void oaktrail_crtc_prepare(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void oaktrail_crtc_commit(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -} - const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { .dpms = oaktrail_crtc_dpms, - .mode_fixup = oaktrail_crtc_mode_fixup, + .mode_fixup = gma_crtc_mode_fixup, .mode_set = oaktrail_crtc_mode_set, .mode_set_base = oaktrail_pipe_set_base, - .prepare = oaktrail_crtc_prepare, - .commit = oaktrail_crtc_commit, + .prepare = gma_crtc_prepare, + .commit = gma_crtc_commit, }; -- cgit v1.2.3 From d903b610d3a319933caf6ca52c76933b11434ef6 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 22:04:20 +0200 Subject: drm/gma500/mdfld: Use identical generic crtc funcs Use the generic gma functions instead of the medfield functions where they are identical. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/mdfld_intel_display.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 2862cdb8920a..0114408c483f 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) } } -static void psb_intel_crtc_prepare(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void psb_intel_crtc_commit(struct drm_crtc *crtc) -{ - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -} - -static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit: const struct drm_crtc_helper_funcs mdfld_helper_funcs = { .dpms = mdfld_crtc_dpms, - .mode_fixup = psb_intel_crtc_mode_fixup, + .mode_fixup = gma_crtc_mode_fixup, .mode_set = mdfld_crtc_mode_set, .mode_set_base = mdfld__intel_pipe_set_base, - .prepare = psb_intel_crtc_prepare, - .commit = psb_intel_crtc_commit, + .prepare = gma_crtc_prepare, + .commit = gma_crtc_commit, }; -- cgit v1.2.3 From b1255b884914920f4086448ec4930e814e97afde Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 23:24:22 +0200 Subject: drm/gma500/psb: Convert to generic crtc->destroy Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index b52bde2a0fe9..f4d308f918e3 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -726,27 +726,6 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, return mode; } -static void psb_intel_crtc_destroy(struct drm_crtc *crtc) -{ - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct gtt_range *gt; - - /* Unpin the old GEM object */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = NULL; - } - - if (psb_intel_crtc->cursor_gt != NULL) - psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt); - kfree(psb_intel_crtc->crtc_state); - drm_crtc_cleanup(crtc); - kfree(psb_intel_crtc); -} - const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = gma_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, @@ -764,7 +743,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = { .cursor_move = psb_intel_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, .set_config = psb_crtc_set_config, - .destroy = psb_intel_crtc_destroy, + .destroy = gma_crtc_destroy, }; const struct gma_clock_funcs psb_clock_funcs = { -- cgit v1.2.3 From 38945be630a5848ffc75f2f9027cbb211dec3982 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 23:43:01 +0200 Subject: drm/gma500: Add generic cursor functions Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/gma_display.c | 151 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 5 ++ 2 files changed, 156 insertions(+) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 7a0888a64a33..40894c206098 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -327,6 +327,157 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(DSPARB, 0x3F3E); } +int gma_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; + uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; + uint32_t temp; + size_t addr = 0; + struct gtt_range *gt; + struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; + struct drm_gem_object *obj; + void *tmp_dst, *tmp_src; + int ret = 0, i, cursor_pages; + + /* If we didn't get a handle then turn the cursor off */ + if (!handle) { + temp = CURSOR_MODE_DISABLE; + + if (gma_power_begin(dev, false)) { + REG_WRITE(control, temp); + REG_WRITE(base, 0); + gma_power_end(dev); + } + + /* Unpin the old GEM object */ + if (psb_intel_crtc->cursor_obj) { + gt = container_of(psb_intel_crtc->cursor_obj, + struct gtt_range, gem); + psb_gtt_unpin(gt); + drm_gem_object_unreference(psb_intel_crtc->cursor_obj); + psb_intel_crtc->cursor_obj = NULL; + } + + return 0; + } + + /* Currently we only support 64x64 cursors */ + if (width != 64 || height != 64) { + dev_dbg(dev->dev, "We currently only support 64x64 cursors\n"); + return -EINVAL; + } + + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (!obj) + return -ENOENT; + + if (obj->size < width * height * 4) { + dev_dbg(dev->dev, "Buffer is too small\n"); + ret = -ENOMEM; + goto unref_cursor; + } + + gt = container_of(obj, struct gtt_range, gem); + + /* Pin the memory into the GTT */ + ret = psb_gtt_pin(gt); + if (ret) { + dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); + goto unref_cursor; + } + + if (dev_priv->ops->cursor_needs_phys) { + if (cursor_gt == NULL) { + dev_err(dev->dev, "No hardware cursor mem available"); + ret = -ENOMEM; + goto unref_cursor; + } + + /* Prevent overflow */ + if (gt->npage > 4) + cursor_pages = 4; + else + cursor_pages = gt->npage; + + /* Copy the cursor to cursor mem */ + tmp_dst = dev_priv->vram_addr + cursor_gt->offset; + for (i = 0; i < cursor_pages; i++) { + tmp_src = kmap(gt->pages[i]); + memcpy(tmp_dst, tmp_src, PAGE_SIZE); + kunmap(gt->pages[i]); + tmp_dst += PAGE_SIZE; + } + + addr = psb_intel_crtc->cursor_addr; + } else { + addr = gt->offset; + psb_intel_crtc->cursor_addr = addr; + } + + temp = 0; + /* set the pipe for the cursor */ + temp |= (pipe << 28); + temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + + if (gma_power_begin(dev, false)) { + REG_WRITE(control, temp); + REG_WRITE(base, addr); + gma_power_end(dev); + } + + /* unpin the old bo */ + if (psb_intel_crtc->cursor_obj) { + gt = container_of(psb_intel_crtc->cursor_obj, + struct gtt_range, gem); + psb_gtt_unpin(gt); + drm_gem_object_unreference(psb_intel_crtc->cursor_obj); + } + + psb_intel_crtc->cursor_obj = obj; + return ret; + +unref_cursor: + drm_gem_object_unreference(obj); + return ret; +} + +int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + uint32_t temp = 0; + uint32_t addr; + + if (x < 0) { + temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); + x = -x; + } + if (y < 0) { + temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); + y = -y; + } + + temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); + temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); + + addr = psb_intel_crtc->cursor_addr; + + if (gma_power_begin(dev, false)) { + REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); + REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); + gma_power_end(dev); + } + return 0; +} + bool gma_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 665164d41224..0d3b6074ca99 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -64,6 +64,11 @@ extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type); extern void gma_wait_for_vblank(struct drm_device *dev); extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); +extern int gma_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, uint32_t height); +extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); extern void gma_crtc_load_lut(struct drm_crtc *crtc); extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, u32 start, u32 size); -- cgit v1.2.3 From 04416625f9264d6a2322cb919fa4b5b2bf72b94f Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 23:46:11 +0200 Subject: drm/gma500/cdv: Convert to generic cursor funcs Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 130 +---------------------------- 1 file changed, 2 insertions(+), 128 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index e30761a9bf83..2351f4223b83 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -1035,132 +1035,6 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc) REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); } -static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, uint32_t height) -{ - struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp; - size_t addr = 0; - struct gtt_range *gt; - struct drm_gem_object *obj; - int ret = 0; - - /* if we want to turn of the cursor ignore width and height */ - if (!handle) { - /* turn off the cursor */ - temp = CURSOR_MODE_DISABLE; - - if (gma_power_begin(dev, false)) { - REG_WRITE(control, temp); - REG_WRITE(base, 0); - gma_power_end(dev); - } - - /* unpin the old GEM object */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = NULL; - } - - return 0; - } - - /* Currently we only support 64x64 cursors */ - if (width != 64 || height != 64) { - dev_dbg(dev->dev, "we currently only support 64x64 cursors\n"); - return -EINVAL; - } - - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; - - if (obj->size < width * height * 4) { - dev_dbg(dev->dev, "buffer is to small\n"); - ret = -ENOMEM; - goto unref_cursor; - } - - gt = container_of(obj, struct gtt_range, gem); - - /* Pin the memory into the GTT */ - ret = psb_gtt_pin(gt); - if (ret) { - dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - goto unref_cursor; - } - - addr = gt->offset; /* Or resource.start ??? */ - - psb_intel_crtc->cursor_addr = addr; - - temp = 0; - /* set the pipe for the cursor */ - temp |= (pipe << 28); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - - if (gma_power_begin(dev, false)) { - REG_WRITE(control, temp); - REG_WRITE(base, addr); - gma_power_end(dev); - } - - /* unpin the old GEM object */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - } - - psb_intel_crtc->cursor_obj = obj; - return ret; - -unref_cursor: - drm_gem_object_unreference(obj); - return ret; -} - -static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - uint32_t temp = 0; - uint32_t adder; - - - if (x < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); - x = -x; - } - if (y < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); - y = -y; - } - - temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); - temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); - - adder = psb_intel_crtc->cursor_addr; - - if (gma_power_begin(dev, false)) { - REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); - REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); - gma_power_end(dev); - } - return 0; -} - static int cdv_crtc_set_config(struct drm_mode_set *set) { int ret = 0; @@ -1331,8 +1205,8 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { const struct drm_crtc_funcs cdv_intel_crtc_funcs = { .save = cdv_intel_crtc_save, .restore = cdv_intel_crtc_restore, - .cursor_set = cdv_intel_crtc_cursor_set, - .cursor_move = cdv_intel_crtc_cursor_move, + .cursor_set = gma_crtc_cursor_set, + .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, .set_config = cdv_crtc_set_config, .destroy = gma_crtc_destroy, -- cgit v1.2.3 From 561573bf69f71c67e6d807efef91c7cf11637817 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 10 Jul 2013 23:48:13 +0200 Subject: drm/gma500/psb: Convert to generic cursor funcs Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 157 +---------------------------- 1 file changed, 2 insertions(+), 155 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index f4d308f918e3..1e6a357a245e 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -434,159 +434,6 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc) REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); } -static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, uint32_t height) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp; - size_t addr = 0; - struct gtt_range *gt; - struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; - struct drm_gem_object *obj; - void *tmp_dst, *tmp_src; - int ret = 0, i, cursor_pages; - - /* if we want to turn of the cursor ignore width and height */ - if (!handle) { - /* turn off the cursor */ - temp = CURSOR_MODE_DISABLE; - - if (gma_power_begin(dev, false)) { - REG_WRITE(control, temp); - REG_WRITE(base, 0); - gma_power_end(dev); - } - - /* Unpin the old GEM object */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = NULL; - } - - return 0; - } - - /* Currently we only support 64x64 cursors */ - if (width != 64 || height != 64) { - dev_dbg(dev->dev, "we currently only support 64x64 cursors\n"); - return -EINVAL; - } - - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; - - if (obj->size < width * height * 4) { - dev_dbg(dev->dev, "buffer is to small\n"); - ret = -ENOMEM; - goto unref_cursor; - } - - gt = container_of(obj, struct gtt_range, gem); - - /* Pin the memory into the GTT */ - ret = psb_gtt_pin(gt); - if (ret) { - dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - goto unref_cursor; - } - - if (dev_priv->ops->cursor_needs_phys) { - if (cursor_gt == NULL) { - dev_err(dev->dev, "No hardware cursor mem available"); - ret = -ENOMEM; - goto unref_cursor; - } - - /* Prevent overflow */ - if (gt->npage > 4) - cursor_pages = 4; - else - cursor_pages = gt->npage; - - /* Copy the cursor to cursor mem */ - tmp_dst = dev_priv->vram_addr + cursor_gt->offset; - for (i = 0; i < cursor_pages; i++) { - tmp_src = kmap(gt->pages[i]); - memcpy(tmp_dst, tmp_src, PAGE_SIZE); - kunmap(gt->pages[i]); - tmp_dst += PAGE_SIZE; - } - - addr = psb_intel_crtc->cursor_addr; - } else { - addr = gt->offset; /* Or resource.start ??? */ - psb_intel_crtc->cursor_addr = addr; - } - - temp = 0; - /* set the pipe for the cursor */ - temp |= (pipe << 28); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - - if (gma_power_begin(dev, false)) { - REG_WRITE(control, temp); - REG_WRITE(base, addr); - gma_power_end(dev); - } - - /* unpin the old bo */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - } - - psb_intel_crtc->cursor_obj = obj; - return ret; - -unref_cursor: - drm_gem_object_unreference(obj); - return ret; -} - -static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; - uint32_t temp = 0; - uint32_t addr; - - - if (x < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); - x = -x; - } - if (y < 0) { - temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); - y = -y; - } - - temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); - temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); - - addr = psb_intel_crtc->cursor_addr; - - if (gma_power_begin(dev, false)) { - REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); - REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); - gma_power_end(dev); - } - return 0; -} - static int psb_crtc_set_config(struct drm_mode_set *set) { int ret; @@ -739,8 +586,8 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { const struct drm_crtc_funcs psb_intel_crtc_funcs = { .save = psb_intel_crtc_save, .restore = psb_intel_crtc_restore, - .cursor_set = psb_intel_crtc_cursor_set, - .cursor_move = psb_intel_crtc_cursor_move, + .cursor_set = gma_crtc_cursor_set, + .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, .set_config = psb_crtc_set_config, .destroy = gma_crtc_destroy, -- cgit v1.2.3 From 593458470191e9226c2530c0e10f8e35604063dc Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Thu, 11 Jul 2013 00:54:45 +0200 Subject: drm/gma500: Add generic encoder functions Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/gma_display.c | 41 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 4 ++++ drivers/gpu/drm/gma500/psb_intel_drv.h | 10 +++++++++ 3 files changed, 55 insertions(+) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 40894c206098..98a0eec38cbe 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -519,6 +519,47 @@ void gma_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } +void gma_encoder_prepare(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *encoder_funcs = + encoder->helper_private; + /* lvds has its own version of prepare see psb_intel_lvds_prepare */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); +} + +void gma_encoder_commit(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *encoder_funcs = + encoder->helper_private; + /* lvds has its own version of commit see psb_intel_lvds_commit */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); +} + +void gma_encoder_destroy(struct drm_encoder *encoder) +{ + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_encoder); +} + +/* Currently there is only a 1:1 mapping of encoders and connectors */ +struct drm_encoder *gma_best_encoder(struct drm_connector *connector) +{ + struct psb_intel_encoder *psb_intel_encoder = + psb_intel_attached_encoder(connector); + + return &psb_intel_encoder->base; +} + +void gma_connector_attach_encoder(struct psb_intel_connector *connector, + struct psb_intel_encoder *encoder) +{ + connector->encoder = encoder; + drm_mode_connector_attach_encoder(&connector->base, + &encoder->base); +} + #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } bool gma_pll_is_valid(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 0d3b6074ca99..1e7016529d75 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -81,6 +81,10 @@ extern void gma_crtc_commit(struct drm_crtc *crtc); extern void gma_crtc_disable(struct drm_crtc *crtc); extern void gma_crtc_destroy(struct drm_crtc *crtc); +extern void gma_encoder_prepare(struct drm_encoder *encoder); +extern void gma_encoder_commit(struct drm_encoder *encoder); +extern void gma_encoder_destroy(struct drm_encoder *encoder); + /* Common clock related functions */ extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); extern void gma_clock(int refclk, struct gma_clock_t *clock); diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index c8cd9bee7a89..39f09e067beb 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -230,12 +230,22 @@ extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); extern void psb_intel_encoder_commit(struct drm_encoder *encoder); extern void psb_intel_encoder_destroy(struct drm_encoder *encoder); +extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector); +extern void gma_connector_attach_encoder(struct psb_intel_connector *connector, + struct psb_intel_encoder *encoder); + static inline struct psb_intel_encoder *psb_intel_attached_encoder( struct drm_connector *connector) { return to_psb_intel_connector(connector)->encoder; } +static inline struct psb_intel_encoder *gma_attached_encoder( + struct drm_connector *connector) +{ + return to_psb_intel_connector(connector)->encoder; +} + extern void psb_intel_connector_attach_encoder( struct psb_intel_connector *connector, struct psb_intel_encoder *encoder); -- cgit v1.2.3 From c9d4959000c0b11c4265af820434b868c4066e0e Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Thu, 11 Jul 2013 01:02:01 +0200 Subject: drm/gma500: Convert to generic encoder funcs Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_crt.c | 13 ++++---- drivers/gpu/drm/gma500/cdv_intel_display.c | 4 +-- drivers/gpu/drm/gma500/cdv_intel_dp.c | 17 +++++----- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 19 ++++++----- drivers/gpu/drm/gma500/cdv_intel_lvds.c | 9 +++--- drivers/gpu/drm/gma500/framebuffer.c | 2 +- drivers/gpu/drm/gma500/gma_display.c | 4 +-- drivers/gpu/drm/gma500/mdfld_intel_display.c | 2 +- drivers/gpu/drm/gma500/oaktrail_crtc.c | 2 +- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 9 +++--- drivers/gpu/drm/gma500/oaktrail_lvds.c | 3 +- drivers/gpu/drm/gma500/psb_drv.c | 2 +- drivers/gpu/drm/gma500/psb_intel_display.c | 47 ++-------------------------- drivers/gpu/drm/gma500/psb_intel_drv.h | 17 ---------- drivers/gpu/drm/gma500/psb_intel_lvds.c | 15 +++++---- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 17 +++++----- 16 files changed, 58 insertions(+), 124 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 7b8386fc3024..0cfcb26fb2a1 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -198,7 +198,7 @@ static enum drm_connector_status cdv_intel_crt_detect( static void cdv_intel_crt_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); drm_sysfs_connector_remove(connector); @@ -209,7 +209,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector) static int cdv_intel_crt_get_modes(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); } @@ -227,8 +227,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector, static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { .dpms = cdv_intel_crt_dpms, .mode_fixup = cdv_intel_crt_mode_fixup, - .prepare = psb_intel_encoder_prepare, - .commit = psb_intel_encoder_commit, + .prepare = gma_encoder_prepare, + .commit = gma_encoder_commit, .mode_set = cdv_intel_crt_mode_set, }; @@ -244,7 +244,7 @@ static const struct drm_connector_helper_funcs cdv_intel_crt_connector_helper_funcs = { .mode_valid = cdv_intel_crt_mode_valid, .get_modes = cdv_intel_crt_get_modes, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) @@ -284,8 +284,7 @@ void cdv_intel_crt_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); /* Set up the DDC bus. */ i2c_reg = GPIOA; diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 2351f4223b83..257e0e8820ec 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -499,7 +499,7 @@ static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) @@ -634,7 +634,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 839ab83ff6ec..e3907becf8c9 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -315,7 +315,7 @@ static int cdv_intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct psb_intel_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); int max_lanes = cdv_intel_dp_max_lane_count(encoder); @@ -1532,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder) static enum drm_connector_status cdv_intel_dp_detect(struct drm_connector *connector, bool force) { - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct psb_intel_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; enum drm_connector_status status; struct edid *edid = NULL; @@ -1566,7 +1566,8 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force) static int cdv_intel_dp_get_modes(struct drm_connector *connector) { - struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); + struct psb_intel_encoder *intel_encoder = + gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; struct edid *edid = NULL; int ret = 0; @@ -1622,7 +1623,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector) static bool cdv_intel_dp_detect_audio(struct drm_connector *connector) { - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct psb_intel_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; struct edid *edid; bool has_audio = false; @@ -1648,7 +1649,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector, uint64_t val) { struct drm_psb_private *dev_priv = connector->dev->dev_private; - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct psb_intel_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; @@ -1702,7 +1703,7 @@ static void cdv_intel_dp_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv; if (is_edp(psb_intel_encoder)) { @@ -1742,7 +1743,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { .get_modes = cdv_intel_dp_get_modes, .mode_valid = cdv_intel_dp_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { @@ -1828,7 +1829,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); if (type == DRM_MODE_CONNECTOR_DisplayPort) psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 464153d9d2df..a849e912f30a 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -117,7 +117,7 @@ static void cdv_hdmi_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); @@ -127,7 +127,7 @@ static void cdv_hdmi_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); @@ -138,7 +138,7 @@ static enum drm_connector_status cdv_hdmi_detect( struct drm_connector *connector, bool force) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; @@ -222,7 +222,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector, static int cdv_hdmi_get_modes(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct edid *edid = NULL; int ret = 0; @@ -257,7 +257,7 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector, static void cdv_hdmi_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (psb_intel_encoder->i2c_bus) psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); @@ -269,16 +269,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { .dpms = cdv_hdmi_dpms, .mode_fixup = cdv_hdmi_mode_fixup, - .prepare = psb_intel_encoder_prepare, + .prepare = gma_encoder_prepare, .mode_set = cdv_hdmi_mode_set, - .commit = psb_intel_encoder_commit, + .commit = gma_encoder_commit, }; static const struct drm_connector_helper_funcs cdv_hdmi_connector_helper_funcs = { .get_modes = cdv_hdmi_get_modes, .mode_valid = cdv_hdmi_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { @@ -328,8 +328,7 @@ void cdv_hdmi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_TMDS); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; hdmi_priv->hdmi_reg = reg; hdmi_priv->has_hdmi_sink = false; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index d81dbc3368f0..e461dce1d088 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -408,7 +408,7 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; int ret; @@ -445,7 +445,7 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) static void cdv_intel_lvds_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (psb_intel_encoder->i2c_bus) psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); @@ -529,7 +529,7 @@ static const struct drm_connector_helper_funcs cdv_intel_lvds_connector_helper_funcs = { .get_modes = cdv_intel_lvds_get_modes, .mode_valid = cdv_intel_lvds_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { @@ -659,8 +659,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, DRM_MODE_ENCODER_LVDS); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 362dd2ad286f..e0e7eb0680e6 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -704,7 +704,7 @@ static void psb_setup_outputs(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct drm_encoder *encoder = &psb_intel_encoder->base; int crtc_mask = 0, clone_mask = 0; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 98a0eec38cbe..67d86d8fcd4d 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -38,7 +38,7 @@ bool gma_pipe_has_type(struct drm_crtc *crtc, int type) list_for_each_entry(l_entry, &mode_config->connector_list, head) { if (l_entry->encoder && l_entry->encoder->crtc == crtc) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(l_entry); + gma_attached_encoder(l_entry); if (psb_intel_encoder->type == type) return true; } @@ -547,7 +547,7 @@ void gma_encoder_destroy(struct drm_encoder *encoder) struct drm_encoder *gma_best_encoder(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); return &psb_intel_encoder->base; } diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 0114408c483f..e5d3a02ac650 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -747,7 +747,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; - psb_intel_encoder = psb_intel_attached_encoder(connector); + psb_intel_encoder = gma_attached_encoder(connector); switch (psb_intel_encoder->type) { case INTEL_OUTPUT_MIPI: diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 504ae1117782..b2744293c5c8 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -324,7 +324,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, if (!connector->encoder || connector->encoder->crtc != crtc) continue; - psb_intel_encoder = psb_intel_attached_encoder(connector); + psb_intel_encoder = gma_attached_encoder(connector); switch (psb_intel_encoder->type) { case INTEL_OUTPUT_LVDS: diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 7d9a5ee52814..059de197c942 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -609,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector) static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { .dpms = oaktrail_hdmi_dpms, .mode_fixup = oaktrail_hdmi_mode_fixup, - .prepare = psb_intel_encoder_prepare, + .prepare = gma_encoder_prepare, .mode_set = oaktrail_hdmi_mode_set, - .commit = psb_intel_encoder_commit, + .commit = gma_encoder_commit, }; static const struct drm_connector_helper_funcs oaktrail_hdmi_connector_helper_funcs = { .get_modes = oaktrail_hdmi_get_modes, .mode_valid = oaktrail_hdmi_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { @@ -663,8 +663,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, &oaktrail_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 325013a9c48c..7b1cfd6d1606 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -352,8 +352,7 @@ void oaktrail_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index b4d13261f762..ab8b9ef86c37 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -372,7 +372,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) /* Only add backlight support if we have LVDS output */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - psb_intel_encoder = psb_intel_attached_encoder(connector); + psb_intel_encoder = gma_attached_encoder(connector); switch (psb_intel_encoder->type) { case INTEL_OUTPUT_LVDS: diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 1e6a357a245e..a419e7969cc8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -82,30 +82,6 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock) clock->dot = clock->vco / clock->p; } -void psb_intel_encoder_prepare(struct drm_encoder *encoder) -{ - struct drm_encoder_helper_funcs *encoder_funcs = - encoder->helper_private; - /* lvds has its own version of prepare see psb_intel_lvds_prepare */ - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); -} - -void psb_intel_encoder_commit(struct drm_encoder *encoder) -{ - struct drm_encoder_helper_funcs *encoder_funcs = - encoder->helper_private; - /* lvds has its own version of commit see psb_intel_lvds_commit */ - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); -} - -void psb_intel_encoder_destroy(struct drm_encoder *encoder) -{ - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -152,7 +128,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) @@ -752,29 +728,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); if (type_mask & (1 << psb_intel_encoder->type)) index_mask |= (1 << entry); entry++; } return index_mask; } - -/* current intel driver doesn't take advantage of encoders - always give back the encoder for the connector -*/ -struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) -{ - struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); - - return &psb_intel_encoder->base; -} - -void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector, - struct psb_intel_encoder *encoder) -{ - connector->encoder = encoder; - drm_mode_connector_attach_encoder(&connector->base, - &encoder->base); -} diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 39f09e067beb..1e1bf8bbfe5a 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -226,33 +226,16 @@ extern void oaktrail_dsi_init(struct drm_device *dev, extern void mid_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int dsi_num); -extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); -extern void psb_intel_encoder_commit(struct drm_encoder *encoder); -extern void psb_intel_encoder_destroy(struct drm_encoder *encoder); - extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector); extern void gma_connector_attach_encoder(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder); -static inline struct psb_intel_encoder *psb_intel_attached_encoder( - struct drm_connector *connector) -{ - return to_psb_intel_connector(connector)->encoder; -} - static inline struct psb_intel_encoder *gma_attached_encoder( struct drm_connector *connector) { return to_psb_intel_connector(connector)->encoder; } -extern void psb_intel_connector_attach_encoder( - struct psb_intel_connector *connector, - struct psb_intel_encoder *encoder); - -extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector - *connector); - extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 9fa5fa2e6192..42541e8ea0ad 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -268,7 +268,7 @@ static void psb_intel_lvds_save(struct drm_connector *connector) struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; @@ -308,7 +308,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) struct drm_device *dev = connector->dev; u32 pp_status; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; @@ -350,7 +350,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector, { struct drm_psb_private *dev_priv = connector->dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct drm_display_mode *fixed_mode = dev_priv->mode_dev.panel_fixed_mode; @@ -526,7 +526,7 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; int ret = 0; @@ -565,7 +565,7 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) void psb_intel_lvds_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; if (lvds_priv->ddc_bus) @@ -656,7 +656,7 @@ const struct drm_connector_helper_funcs psb_intel_lvds_connector_helper_funcs = { .get_modes = psb_intel_lvds_get_modes, .mode_valid = psb_intel_lvds_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { @@ -734,8 +734,7 @@ void psb_intel_lvds_init(struct drm_device *dev, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - psb_intel_connector_attach_encoder(psb_intel_connector, - psb_intel_encoder); + gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index e3d1078ecf09..631e01abc04b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -200,7 +200,7 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder) static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { - return container_of(psb_intel_attached_encoder(connector), + return container_of(gma_attached_encoder(connector), struct psb_intel_sdvo, base); } @@ -1837,7 +1837,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); + gma_attached_encoder(connector); struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&psb_intel_encoder->base); @@ -1847,8 +1847,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector) static void psb_intel_sdvo_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = - &psb_intel_attached_encoder(connector)->base; + struct drm_encoder *encoder = &gma_attached_encoder(connector)->base; struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder); struct drm_crtc *crtc = encoder->crtc; @@ -1864,9 +1863,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector) static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { .dpms = psb_intel_sdvo_dpms, .mode_fixup = psb_intel_sdvo_mode_fixup, - .prepare = psb_intel_encoder_prepare, + .prepare = gma_encoder_prepare, .mode_set = psb_intel_sdvo_mode_set, - .commit = psb_intel_encoder_commit, + .commit = gma_encoder_commit, }; static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { @@ -1882,7 +1881,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { .get_modes = psb_intel_sdvo_get_modes, .mode_valid = psb_intel_sdvo_mode_valid, - .best_encoder = psb_intel_best_encoder, + .best_encoder = gma_best_encoder, }; static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) @@ -1894,7 +1893,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) psb_intel_sdvo->sdvo_lvds_fixed_mode); i2c_del_adapter(&psb_intel_sdvo->ddc); - psb_intel_encoder_destroy(encoder); + gma_encoder_destroy(encoder); } static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { @@ -2055,7 +2054,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector, connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; - psb_intel_connector_attach_encoder(&connector->base, &encoder->base); + gma_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); } -- cgit v1.2.3 From 2e775700a297982a3ffbfe72935982b6fb51e015 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:30:56 +0200 Subject: drm/gma500: Add generic crtc save/restore funcs Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/gma_display.c | 105 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 3 + 2 files changed, 108 insertions(+) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 67d86d8fcd4d..cca40c0b64e4 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -519,6 +519,111 @@ void gma_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } +/** + * Save HW states of given crtc + */ +void gma_crtc_save(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; + const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + uint32_t palette_reg; + int i; + + if (!crtc_state) { + dev_err(dev->dev, "No CRTC state found\n"); + return; + } + + crtc_state->saveDSPCNTR = REG_READ(map->cntr); + crtc_state->savePIPECONF = REG_READ(map->conf); + crtc_state->savePIPESRC = REG_READ(map->src); + crtc_state->saveFP0 = REG_READ(map->fp0); + crtc_state->saveFP1 = REG_READ(map->fp1); + crtc_state->saveDPLL = REG_READ(map->dpll); + crtc_state->saveHTOTAL = REG_READ(map->htotal); + crtc_state->saveHBLANK = REG_READ(map->hblank); + crtc_state->saveHSYNC = REG_READ(map->hsync); + crtc_state->saveVTOTAL = REG_READ(map->vtotal); + crtc_state->saveVBLANK = REG_READ(map->vblank); + crtc_state->saveVSYNC = REG_READ(map->vsync); + crtc_state->saveDSPSTRIDE = REG_READ(map->stride); + + /* NOTE: DSPSIZE DSPPOS only for psb */ + crtc_state->saveDSPSIZE = REG_READ(map->size); + crtc_state->saveDSPPOS = REG_READ(map->pos); + + crtc_state->saveDSPBASE = REG_READ(map->base); + + palette_reg = map->palette; + for (i = 0; i < 256; ++i) + crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); +} + +/** + * Restore HW states of given crtc + */ +void gma_crtc_restore(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; + const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + uint32_t palette_reg; + int i; + + if (!crtc_state) { + dev_err(dev->dev, "No crtc state\n"); + return; + } + + if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { + REG_WRITE(map->dpll, + crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); + REG_READ(map->dpll); + udelay(150); + } + + REG_WRITE(map->fp0, crtc_state->saveFP0); + REG_READ(map->fp0); + + REG_WRITE(map->fp1, crtc_state->saveFP1); + REG_READ(map->fp1); + + REG_WRITE(map->dpll, crtc_state->saveDPLL); + REG_READ(map->dpll); + udelay(150); + + REG_WRITE(map->htotal, crtc_state->saveHTOTAL); + REG_WRITE(map->hblank, crtc_state->saveHBLANK); + REG_WRITE(map->hsync, crtc_state->saveHSYNC); + REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); + REG_WRITE(map->vblank, crtc_state->saveVBLANK); + REG_WRITE(map->vsync, crtc_state->saveVSYNC); + REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); + + REG_WRITE(map->size, crtc_state->saveDSPSIZE); + REG_WRITE(map->pos, crtc_state->saveDSPPOS); + + REG_WRITE(map->src, crtc_state->savePIPESRC); + REG_WRITE(map->base, crtc_state->saveDSPBASE); + REG_WRITE(map->conf, crtc_state->savePIPECONF); + + gma_wait_for_vblank(dev); + + REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); + REG_WRITE(map->base, crtc_state->saveDSPBASE); + + gma_wait_for_vblank(dev); + + palette_reg = map->palette; + for (i = 0; i < 256; ++i) + REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]); +} + void gma_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 1e7016529d75..8847de069516 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -81,6 +81,9 @@ extern void gma_crtc_commit(struct drm_crtc *crtc); extern void gma_crtc_disable(struct drm_crtc *crtc); extern void gma_crtc_destroy(struct drm_crtc *crtc); +extern void gma_crtc_save(struct drm_crtc *crtc); +extern void gma_crtc_restore(struct drm_crtc *crtc); + extern void gma_encoder_prepare(struct drm_encoder *encoder); extern void gma_encoder_commit(struct drm_encoder *encoder); extern void gma_encoder_destroy(struct drm_encoder *encoder); -- cgit v1.2.3 From 0e5b26ab67bbc3f762444264cdc8be7db12f374c Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:32:18 +0200 Subject: drm/gma500/psb: Convert to generic save/restore Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 109 +---------------------------- 1 file changed, 2 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index a419e7969cc8..317c585e8540 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -305,111 +305,6 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } -/** - * Save HW states of giving crtc - */ -static void psb_intel_crtc_save(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; - uint32_t paletteReg; - int i; - - if (!crtc_state) { - dev_err(dev->dev, "No CRTC state found\n"); - return; - } - - crtc_state->saveDSPCNTR = REG_READ(map->cntr); - crtc_state->savePIPECONF = REG_READ(map->conf); - crtc_state->savePIPESRC = REG_READ(map->src); - crtc_state->saveFP0 = REG_READ(map->fp0); - crtc_state->saveFP1 = REG_READ(map->fp1); - crtc_state->saveDPLL = REG_READ(map->dpll); - crtc_state->saveHTOTAL = REG_READ(map->htotal); - crtc_state->saveHBLANK = REG_READ(map->hblank); - crtc_state->saveHSYNC = REG_READ(map->hsync); - crtc_state->saveVTOTAL = REG_READ(map->vtotal); - crtc_state->saveVBLANK = REG_READ(map->vblank); - crtc_state->saveVSYNC = REG_READ(map->vsync); - crtc_state->saveDSPSTRIDE = REG_READ(map->stride); - - /*NOTE: DSPSIZE DSPPOS only for psb*/ - crtc_state->saveDSPSIZE = REG_READ(map->size); - crtc_state->saveDSPPOS = REG_READ(map->pos); - - crtc_state->saveDSPBASE = REG_READ(map->base); - - paletteReg = map->palette; - for (i = 0; i < 256; ++i) - crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); -} - -/** - * Restore HW states of giving crtc - */ -static void psb_intel_crtc_restore(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; - uint32_t paletteReg; - int i; - - if (!crtc_state) { - dev_err(dev->dev, "No crtc state\n"); - return; - } - - if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { - REG_WRITE(map->dpll, - crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); - REG_READ(map->dpll); - udelay(150); - } - - REG_WRITE(map->fp0, crtc_state->saveFP0); - REG_READ(map->fp0); - - REG_WRITE(map->fp1, crtc_state->saveFP1); - REG_READ(map->fp1); - - REG_WRITE(map->dpll, crtc_state->saveDPLL); - REG_READ(map->dpll); - udelay(150); - - REG_WRITE(map->htotal, crtc_state->saveHTOTAL); - REG_WRITE(map->hblank, crtc_state->saveHBLANK); - REG_WRITE(map->hsync, crtc_state->saveHSYNC); - REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); - REG_WRITE(map->vblank, crtc_state->saveVBLANK); - REG_WRITE(map->vsync, crtc_state->saveVSYNC); - REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); - - REG_WRITE(map->size, crtc_state->saveDSPSIZE); - REG_WRITE(map->pos, crtc_state->saveDSPPOS); - - REG_WRITE(map->src, crtc_state->savePIPESRC); - REG_WRITE(map->base, crtc_state->saveDSPBASE); - REG_WRITE(map->conf, crtc_state->savePIPECONF); - - gma_wait_for_vblank(dev); - - REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); - REG_WRITE(map->base, crtc_state->saveDSPBASE); - - gma_wait_for_vblank(dev); - - paletteReg = map->palette; - for (i = 0; i < 256; ++i) - REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); -} - static int psb_crtc_set_config(struct drm_mode_set *set) { int ret; @@ -560,8 +455,8 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { - .save = psb_intel_crtc_save, - .restore = psb_intel_crtc_restore, + .save = gma_crtc_save, + .restore = gma_crtc_restore, .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, -- cgit v1.2.3 From f0ff07b73b9b5be1f725f333d1516d569c697104 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:33:47 +0200 Subject: drm/gma500/cdv: Convert to generic save/restore Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 172 +---------------------------- 1 file changed, 2 insertions(+), 170 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 257e0e8820ec..b84912ff1711 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -867,174 +867,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } - -/** - * Save HW states of giving crtc - */ -static void cdv_intel_crtc_save(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; - uint32_t paletteReg; - int i; - - if (!crtc_state) { - dev_dbg(dev->dev, "No CRTC state found\n"); - return; - } - - crtc_state->saveDSPCNTR = REG_READ(map->cntr); - crtc_state->savePIPECONF = REG_READ(map->conf); - crtc_state->savePIPESRC = REG_READ(map->src); - crtc_state->saveFP0 = REG_READ(map->fp0); - crtc_state->saveFP1 = REG_READ(map->fp1); - crtc_state->saveDPLL = REG_READ(map->dpll); - crtc_state->saveHTOTAL = REG_READ(map->htotal); - crtc_state->saveHBLANK = REG_READ(map->hblank); - crtc_state->saveHSYNC = REG_READ(map->hsync); - crtc_state->saveVTOTAL = REG_READ(map->vtotal); - crtc_state->saveVBLANK = REG_READ(map->vblank); - crtc_state->saveVSYNC = REG_READ(map->vsync); - crtc_state->saveDSPSTRIDE = REG_READ(map->stride); - - /*NOTE: DSPSIZE DSPPOS only for psb*/ - crtc_state->saveDSPSIZE = REG_READ(map->size); - crtc_state->saveDSPPOS = REG_READ(map->pos); - - crtc_state->saveDSPBASE = REG_READ(map->base); - - DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", - crtc_state->saveDSPCNTR, - crtc_state->savePIPECONF, - crtc_state->savePIPESRC, - crtc_state->saveFP0, - crtc_state->saveFP1, - crtc_state->saveDPLL, - crtc_state->saveHTOTAL, - crtc_state->saveHBLANK, - crtc_state->saveHSYNC, - crtc_state->saveVTOTAL, - crtc_state->saveVBLANK, - crtc_state->saveVSYNC, - crtc_state->saveDSPSTRIDE, - crtc_state->saveDSPSIZE, - crtc_state->saveDSPPOS, - crtc_state->saveDSPBASE - ); - - paletteReg = map->palette; - for (i = 0; i < 256; ++i) - crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); -} - -/** - * Restore HW states of giving crtc - */ -static void cdv_intel_crtc_restore(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; - uint32_t paletteReg; - int i; - - if (!crtc_state) { - dev_dbg(dev->dev, "No crtc state\n"); - return; - } - - DRM_DEBUG( - "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", - REG_READ(map->cntr), - REG_READ(map->conf), - REG_READ(map->src), - REG_READ(map->fp0), - REG_READ(map->fp1), - REG_READ(map->dpll), - REG_READ(map->htotal), - REG_READ(map->hblank), - REG_READ(map->hsync), - REG_READ(map->vtotal), - REG_READ(map->vblank), - REG_READ(map->vsync), - REG_READ(map->stride), - REG_READ(map->size), - REG_READ(map->pos), - REG_READ(map->base) - ); - - DRM_DEBUG( - "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", - crtc_state->saveDSPCNTR, - crtc_state->savePIPECONF, - crtc_state->savePIPESRC, - crtc_state->saveFP0, - crtc_state->saveFP1, - crtc_state->saveDPLL, - crtc_state->saveHTOTAL, - crtc_state->saveHBLANK, - crtc_state->saveHSYNC, - crtc_state->saveVTOTAL, - crtc_state->saveVBLANK, - crtc_state->saveVSYNC, - crtc_state->saveDSPSTRIDE, - crtc_state->saveDSPSIZE, - crtc_state->saveDSPPOS, - crtc_state->saveDSPBASE - ); - - - if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { - REG_WRITE(map->dpll, - crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); - REG_READ(map->dpll); - DRM_DEBUG("write dpll: %x\n", - REG_READ(map->dpll)); - udelay(150); - } - - REG_WRITE(map->fp0, crtc_state->saveFP0); - REG_READ(map->fp0); - - REG_WRITE(map->fp1, crtc_state->saveFP1); - REG_READ(map->fp1); - - REG_WRITE(map->dpll, crtc_state->saveDPLL); - REG_READ(map->dpll); - udelay(150); - - REG_WRITE(map->htotal, crtc_state->saveHTOTAL); - REG_WRITE(map->hblank, crtc_state->saveHBLANK); - REG_WRITE(map->hsync, crtc_state->saveHSYNC); - REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); - REG_WRITE(map->vblank, crtc_state->saveVBLANK); - REG_WRITE(map->vsync, crtc_state->saveVSYNC); - REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); - - REG_WRITE(map->size, crtc_state->saveDSPSIZE); - REG_WRITE(map->pos, crtc_state->saveDSPPOS); - - REG_WRITE(map->src, crtc_state->savePIPESRC); - REG_WRITE(map->base, crtc_state->saveDSPBASE); - REG_WRITE(map->conf, crtc_state->savePIPECONF); - - gma_wait_for_vblank(dev); - - REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); - REG_WRITE(map->base, crtc_state->saveDSPBASE); - - gma_wait_for_vblank(dev); - - paletteReg = map->palette; - for (i = 0; i < 256; ++i) - REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); -} - static int cdv_crtc_set_config(struct drm_mode_set *set) { int ret = 0; @@ -1203,8 +1035,8 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { }; const struct drm_crtc_funcs cdv_intel_crtc_funcs = { - .save = cdv_intel_crtc_save, - .restore = cdv_intel_crtc_restore, + .save = gma_crtc_save, + .restore = gma_crtc_restore, .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, -- cgit v1.2.3 From 924cb5ffd81d66cc6461de955f7cb144cb3b7b6d Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:38:52 +0200 Subject: drm/gma500: Add generic set_config() function Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/gma_display.c | 16 ++++++++++++++++ drivers/gpu/drm/gma500/gma_display.h | 3 +++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index cca40c0b64e4..2a3e9254e651 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -519,6 +519,22 @@ void gma_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } +int gma_crtc_set_config(struct drm_mode_set *set) +{ + struct drm_device *dev = set->crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + int ret; + + if (!dev_priv->rpm_enabled) + return drm_crtc_helper_set_config(set); + + pm_runtime_forbid(&dev->pdev->dev); + ret = drm_crtc_helper_set_config(set); + pm_runtime_allow(&dev->pdev->dev); + + return ret; +} + /** * Save HW states of given crtc */ diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 8847de069516..1044c165c714 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -22,6 +22,8 @@ #ifndef _GMA_DISPLAY_H_ #define _GMA_DISPLAY_H_ +#include + struct gma_clock_t { /* given values */ int n; @@ -80,6 +82,7 @@ extern void gma_crtc_prepare(struct drm_crtc *crtc); extern void gma_crtc_commit(struct drm_crtc *crtc); extern void gma_crtc_disable(struct drm_crtc *crtc); extern void gma_crtc_destroy(struct drm_crtc *crtc); +extern int gma_crtc_set_config(struct drm_mode_set *set); extern void gma_crtc_save(struct drm_crtc *crtc); extern void gma_crtc_restore(struct drm_crtc *crtc); -- cgit v1.2.3 From 43a83027d4705bb6b6506f9467c9c4d3e2a1b504 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:41:36 +0200 Subject: drm/gma500/psb: Convert to generic set_config() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/psb_intel_display.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 317c585e8540..05faf1c1ff06 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -19,7 +19,6 @@ */ #include -#include #include #include "framebuffer.h" @@ -305,21 +304,6 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } -static int psb_crtc_set_config(struct drm_mode_set *set) -{ - int ret; - struct drm_device *dev = set->crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - if (!dev_priv->rpm_enabled) - return drm_crtc_helper_set_config(set); - - pm_runtime_forbid(&dev->pdev->dev); - ret = drm_crtc_helper_set_config(set); - pm_runtime_allow(&dev->pdev->dev); - return ret; -} - /* Returns the clock of the currently programmed mode of the given pipe. */ static int psb_intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) @@ -460,7 +444,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = { .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, - .set_config = psb_crtc_set_config, + .set_config = gma_crtc_set_config, .destroy = gma_crtc_destroy, }; -- cgit v1.2.3 From c5c81f4e1bc9c8ee1c3637de51ee180efbbf629c Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Fri, 12 Jul 2013 15:43:54 +0200 Subject: drm/gma500/cdv: Convert to generic set_config() Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_display.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index b84912ff1711..6bca1fe470ab 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -19,7 +19,6 @@ */ #include -#include #include #include "framebuffer.h" @@ -867,24 +866,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } -static int cdv_crtc_set_config(struct drm_mode_set *set) -{ - int ret = 0; - struct drm_device *dev = set->crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - if (!dev_priv->rpm_enabled) - return drm_crtc_helper_set_config(set); - - pm_runtime_forbid(&dev->pdev->dev); - - ret = drm_crtc_helper_set_config(set); - - pm_runtime_allow(&dev->pdev->dev); - - return ret; -} - /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ /* FIXME: why are we using this, should it be cdv_ in this tree ? */ @@ -1040,7 +1021,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = { .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, - .set_config = cdv_crtc_set_config, + .set_config = gma_crtc_set_config, .destroy = gma_crtc_destroy, }; -- cgit v1.2.3 From 6306865daf0283d1b13adea8be8d1ad4dd0ea1c3 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Mon, 22 Jul 2013 01:31:23 +0200 Subject: drm/gma500: Rename psb_intel_crtc to gma_crtc The psb_intel_crtc is generic and should be named appropriately Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_crt.c | 7 +- drivers/gpu/drm/gma500/cdv_intel_display.c | 28 +++---- drivers/gpu/drm/gma500/cdv_intel_dp.c | 10 +-- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 6 +- drivers/gpu/drm/gma500/cdv_intel_lvds.c | 8 +- drivers/gpu/drm/gma500/framebuffer.c | 16 ++-- drivers/gpu/drm/gma500/gma_display.c | 105 +++++++++++++-------------- drivers/gpu/drm/gma500/mdfld_dsi_output.c | 15 ++-- drivers/gpu/drm/gma500/mdfld_intel_display.c | 16 ++-- drivers/gpu/drm/gma500/oaktrail_crtc.c | 16 ++-- drivers/gpu/drm/gma500/psb_drv.c | 6 +- drivers/gpu/drm/gma500/psb_intel_display.c | 98 ++++++++++++------------- drivers/gpu/drm/gma500/psb_intel_drv.h | 6 +- drivers/gpu/drm/gma500/psb_intel_lvds.c | 10 +-- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 4 +- 15 files changed, 170 insertions(+), 181 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 0cfcb26fb2a1..79ad19696f71 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; - struct psb_intel_crtc *psb_intel_crtc = - to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int dpll_md_reg; u32 adpa, dpll_md; u32 adpa_reg; - if (psb_intel_crtc->pipe == 0) + if (gma_crtc->pipe == 0) dpll_md_reg = DPLL_A_MD; else dpll_md_reg = DPLL_B_MD; @@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; - if (psb_intel_crtc->pipe == 0) + if (gma_crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; else adpa |= ADPA_PIPE_B_SELECT; diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 6bca1fe470ab..e18c3b9fd07f 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -222,8 +222,8 @@ static int cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, struct gma_clock_t *clock, bool is_lvds, u32 ddi_select) { - struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; u32 m, n_vco, p; int ret = 0; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; @@ -458,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe) { struct drm_crtc *crtc; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = NULL; + struct gma_crtc *gma_crtc = NULL; crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - psb_intel_crtc = to_psb_intel_crtc(crtc); + gma_crtc = to_gma_crtc(crtc); - if (crtc->fb == NULL || !psb_intel_crtc->active) + if (crtc->fb == NULL || !gma_crtc->active) return false; return true; } @@ -489,11 +489,11 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev) static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; - if (psb_intel_crtc->pipe != 1) + if (gma_crtc->pipe != 1) return false; list_for_each_entry(connector, &mode_config->connector_list, head) { @@ -616,8 +616,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk; struct gma_clock_t clock; @@ -693,7 +693,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, drm_mode_debug_printmodeline(adjusted_mode); - limit = psb_intel_crtc->clock_funcs->limit(crtc, refclk); + limit = gma_crtc->clock_funcs->limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); @@ -883,8 +883,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 dpll; u32 fp; @@ -961,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev, struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; const struct psb_offset *map = &dev_priv->regmap[pipe]; diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index e3907becf8c9..a90adf629925 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -793,10 +793,10 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_psb_private *dev_priv = dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int lane_count = 4, bpp = 24; struct cdv_intel_dp_m_n m_n; - int pipe = intel_crtc->pipe; + int pipe = gma_crtc->pipe; /* * Find the lane count in the intel_encoder private @@ -844,7 +844,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode { struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); struct drm_crtc *crtc = encoder->crtc; - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; struct drm_device *dev = encoder->dev; @@ -886,7 +886,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode } /* CPT DP's pipe select is decided in TRANS_DP_CTL */ - if (intel_crtc->pipe == 1) + if (gma_crtc->pipe == 1) intel_dp->DP |= DP_PIPEB_SELECT; REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); @@ -901,7 +901,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode else pfit_control = 0; - pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; + pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT; REG_WRITE(PFIT_CONTROL, pfit_control); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index a849e912f30a..5c3b3eabb5e9 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -68,7 +68,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; u32 hdmib; struct drm_crtc *crtc = encoder->crtc; - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); hdmib = (2 << 10); @@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmib |= HDMI_HSYNC_ACTIVE_HIGH; - if (intel_crtc->pipe == 1) + if (gma_crtc->pipe == 1) hdmib |= HDMIB_PIPE_B_SELECT; if (hdmi_priv->has_hdmi_audio) { @@ -167,7 +167,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector, struct drm_encoder *encoder = connector->encoder; if (!strcmp(property->name, "scaling mode") && encoder) { - struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); + struct gma_crtc *crtc = to_gma_crtc(encoder->crtc); bool centre; uint64_t curValue; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index e461dce1d088..53f1d8f5e9cd 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc( - encoder->crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc); u32 pfit_control; /* @@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder, else pfit_control = 0; - pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; + pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT; if (dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; @@ -461,8 +460,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, struct drm_encoder *encoder = connector->encoder; if (!strcmp(property->name, "scaling mode") && encoder) { - struct psb_intel_crtc *crtc = - to_psb_intel_crtc(encoder->crtc); + struct gma_crtc *crtc = to_gma_crtc(encoder->crtc); uint64_t curValue; if (!crtc) diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index e0e7eb0680e6..6ebabb5ca3c2 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -520,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - intel_crtc->lut_r[regno] = red >> 8; - intel_crtc->lut_g[regno] = green >> 8; - intel_crtc->lut_b[regno] = blue >> 8; + gma_crtc->lut_r[regno] = red >> 8; + gma_crtc->lut_g[regno] = green >> 8; + gma_crtc->lut_b[regno] = blue >> 8; } static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - *red = intel_crtc->lut_r[regno] << 8; - *green = intel_crtc->lut_g[regno] << 8; - *blue = intel_crtc->lut_b[regno] << 8; + *red = gma_crtc->lut_r[regno] << 8; + *green = gma_crtc->lut_g[regno] << 8; + *blue = gma_crtc->lut_b[regno] << 8; } static int psbfb_probe(struct drm_fb_helper *helper, diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 2a3e9254e651..b87180344ad6 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -58,9 +58,9 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; u32 dspcntr; @@ -140,8 +140,8 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; int palreg = map->palette; int i; @@ -152,24 +152,24 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) if (gma_power_begin(dev, false)) { for (i = 0; i < 256; i++) { REG_WRITE(palreg + 4 * i, - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i])); + ((gma_crtc->lut_r[i] + + gma_crtc->lut_adj[i]) << 16) | + ((gma_crtc->lut_g[i] + + gma_crtc->lut_adj[i]) << 8) | + (gma_crtc->lut_b[i] + + gma_crtc->lut_adj[i])); } gma_power_end(dev); } else { for (i = 0; i < 256; i++) { /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ dev_priv->regs.pipe[0].palette[i] = - ((psb_intel_crtc->lut_r[i] + - psb_intel_crtc->lut_adj[i]) << 16) | - ((psb_intel_crtc->lut_g[i] + - psb_intel_crtc->lut_adj[i]) << 8) | - (psb_intel_crtc->lut_b[i] + - psb_intel_crtc->lut_adj[i]); + ((gma_crtc->lut_r[i] + + gma_crtc->lut_adj[i]) << 16) | + ((gma_crtc->lut_g[i] + + gma_crtc->lut_adj[i]) << 8) | + (gma_crtc->lut_b[i] + + gma_crtc->lut_adj[i]); } } @@ -178,14 +178,14 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, u32 start, u32 size) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int i; int end = (start + size > 256) ? 256 : start + size; for (i = start; i < end; i++) { - psb_intel_crtc->lut_r[i] = red[i] >> 8; - psb_intel_crtc->lut_g[i] = green[i] >> 8; - psb_intel_crtc->lut_b[i] = blue[i] >> 8; + gma_crtc->lut_r[i] = red[i] >> 8; + gma_crtc->lut_g[i] = green[i] >> 8; + gma_crtc->lut_b[i] = blue[i] >> 8; } gma_crtc_load_lut(crtc); @@ -201,8 +201,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 temp; @@ -217,10 +217,10 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: - if (psb_intel_crtc->active) + if (gma_crtc->active) break; - psb_intel_crtc->active = true; + gma_crtc->active = true; /* Enable the DPLL */ temp = REG_READ(map->dpll); @@ -268,10 +268,10 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) /* psb_intel_crtc_dpms_video(crtc, true); TODO */ break; case DRM_MODE_DPMS_OFF: - if (!psb_intel_crtc->active) + if (!gma_crtc->active) break; - psb_intel_crtc->active = false; + gma_crtc->active = false; /* Give the overlay scaler a chance to disable * if it's on this pipe */ @@ -334,14 +334,14 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp; size_t addr = 0; struct gtt_range *gt; - struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; + struct gtt_range *cursor_gt = gma_crtc->cursor_gt; struct drm_gem_object *obj; void *tmp_dst, *tmp_src; int ret = 0, i, cursor_pages; @@ -357,12 +357,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, } /* Unpin the old GEM object */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, + if (gma_crtc->cursor_obj) { + gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = NULL; + drm_gem_object_unreference(gma_crtc->cursor_obj); + gma_crtc->cursor_obj = NULL; } return 0; @@ -415,10 +415,10 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, tmp_dst += PAGE_SIZE; } - addr = psb_intel_crtc->cursor_addr; + addr = gma_crtc->cursor_addr; } else { addr = gt->offset; - psb_intel_crtc->cursor_addr = addr; + gma_crtc->cursor_addr = addr; } temp = 0; @@ -433,14 +433,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, } /* unpin the old bo */ - if (psb_intel_crtc->cursor_obj) { - gt = container_of(psb_intel_crtc->cursor_obj, - struct gtt_range, gem); + if (gma_crtc->cursor_obj) { + gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); - drm_gem_object_unreference(psb_intel_crtc->cursor_obj); + drm_gem_object_unreference(gma_crtc->cursor_obj); } - psb_intel_crtc->cursor_obj = obj; + gma_crtc->cursor_obj = obj; return ret; unref_cursor: @@ -451,8 +450,8 @@ unref_cursor: int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; uint32_t temp = 0; uint32_t addr; @@ -468,7 +467,7 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); - addr = psb_intel_crtc->cursor_addr; + addr = gma_crtc->cursor_addr; if (gma_power_begin(dev, false)) { REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); @@ -512,11 +511,11 @@ void gma_crtc_disable(struct drm_crtc *crtc) void gma_crtc_destroy(struct drm_crtc *crtc) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - kfree(psb_intel_crtc->crtc_state); + kfree(gma_crtc->crtc_state); drm_crtc_cleanup(crtc); - kfree(psb_intel_crtc); + kfree(gma_crtc); } int gma_crtc_set_config(struct drm_mode_set *set) @@ -542,9 +541,9 @@ void gma_crtc_save(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; + const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; uint32_t palette_reg; int i; @@ -585,9 +584,9 @@ void gma_crtc_restore(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; - const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe]; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; + const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; uint32_t palette_reg; int i; @@ -720,7 +719,7 @@ bool gma_find_best_pll(const struct gma_limit_t *limit, { struct drm_device *dev = crtc->dev; const struct gma_clock_funcs *clock_funcs = - to_psb_intel_crtc(crtc)->clock_funcs; + to_gma_crtc(crtc)->clock_funcs; struct gma_clock_t clock; int err = target; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 3abf8315f57c..860a4ee9baaf 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, struct drm_encoder *encoder = connector->encoder; if (!strcmp(property->name, "scaling mode") && encoder) { - struct psb_intel_crtc *psb_crtc = - to_psb_intel_crtc(encoder->crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc); bool centerechange; uint64_t val; - if (!psb_crtc) + if (!gma_crtc) goto set_prop_error; switch (value) { @@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || (value == DRM_MODE_SCALE_NO_SCALE); - if (psb_crtc->saved_mode.hdisplay != 0 && - psb_crtc->saved_mode.vdisplay != 0) { + if (gma_crtc->saved_mode.hdisplay != 0 && + gma_crtc->saved_mode.vdisplay != 0) { if (centerechange) { if (!drm_crtc_helper_set_mode(encoder->crtc, - &psb_crtc->saved_mode, + &gma_crtc->saved_mode, encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb)) @@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, struct drm_encoder_helper_funcs *funcs = encoder->helper_private; funcs->mode_set(encoder, - &psb_crtc->saved_mode, - &psb_crtc->saved_adjusted_mode); + &gma_crtc->saved_mode, + &gma_crtc->saved_adjusted_mode); } } } else if (!strcmp(property->name, "backlight") && encoder) { diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index e5d3a02ac650..adb29e370ce5 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -165,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; u32 dspcntr; @@ -305,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 pipeconf = dev_priv->pipeconf[pipe]; u32 temp; @@ -669,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_psb_private *dev_priv = dev->dev_private; - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk = 0; int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, @@ -730,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, if (!gma_power_begin(dev, true)) return 0; - memcpy(&psb_intel_crtc->saved_mode, mode, + memcpy(&gma_crtc->saved_mode, mode, sizeof(struct drm_display_mode)); - memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, + memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode)); list_for_each_entry(connector, &mode_config->connector_list, head) { diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index b2744293c5c8..284e62ae7b57 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 temp; @@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_psb_private *dev_priv = dev->dev_private; - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk = 0; struct oaktrail_clock_t clock; @@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, if (!gma_power_begin(dev, true)) return 0; - memcpy(&psb_intel_crtc->saved_mode, + memcpy(&gma_crtc->saved_mode, mode, sizeof(struct drm_display_mode)); - memcpy(&psb_intel_crtc->saved_adjusted_mode, + memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode)); @@ -499,9 +499,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index ab8b9ef86c37..6c2d139aab8b 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data, struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_connector *connector; - struct psb_intel_crtc *psb_intel_crtc; + struct gma_crtc *gma_crtc; int i = 0; int32_t obj_id; @@ -454,10 +454,10 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data, connector = obj_to_connector(obj); crtc = connector->encoder->crtc; - psb_intel_crtc = to_psb_intel_crtc(crtc); + gma_crtc = to_gma_crtc(crtc); for (i = 0; i < 256; i++) - psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; + gma_crtc->lut_adj[i] = lut_arg->lut[i]; gma_crtc_load_lut(crtc); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 05faf1c1ff06..c665c91dabde 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -106,9 +106,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk; struct gma_clock_t clock; @@ -148,7 +148,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, refclk = 96000; - limit = psb_intel_crtc->clock_funcs->limit(crtc, refclk); + limit = gma_crtc->clock_funcs->limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); @@ -308,9 +308,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, static int psb_intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_psb_private *dev_priv = dev->dev_private; - int pipe = psb_intel_crtc->pipe; + int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 dpll; u32 fp; @@ -384,8 +384,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev, struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - int pipe = psb_intel_crtc->pipe; + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + int pipe = gma_crtc->pipe; struct drm_display_mode *mode; int htot; int hsync; @@ -459,7 +459,7 @@ const struct gma_clock_funcs psb_clock_funcs = { * to zero. This is a workaround for h/w defect on Oaktrail */ static void psb_intel_cursor_init(struct drm_device *dev, - struct psb_intel_crtc *psb_intel_crtc) + struct gma_crtc *gma_crtc) { struct drm_psb_private *dev_priv = dev->dev_private; u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; @@ -472,91 +472,87 @@ static void psb_intel_cursor_init(struct drm_device *dev, */ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); if (!cursor_gt) { - psb_intel_crtc->cursor_gt = NULL; + gma_crtc->cursor_gt = NULL; goto out; } - psb_intel_crtc->cursor_gt = cursor_gt; - psb_intel_crtc->cursor_addr = dev_priv->stolen_base + + gma_crtc->cursor_gt = cursor_gt; + gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_gt->offset; } else { - psb_intel_crtc->cursor_gt = NULL; + gma_crtc->cursor_gt = NULL; } out: - REG_WRITE(control[psb_intel_crtc->pipe], 0); - REG_WRITE(base[psb_intel_crtc->pipe], 0); + REG_WRITE(control[gma_crtc->pipe], 0); + REG_WRITE(base[gma_crtc->pipe], 0); } void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct psb_intel_mode_device *mode_dev) { struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_crtc *psb_intel_crtc; + struct gma_crtc *gma_crtc; int i; uint16_t *r_base, *g_base, *b_base; /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ - psb_intel_crtc = - kzalloc(sizeof(struct psb_intel_crtc) + - (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), - GFP_KERNEL); - if (psb_intel_crtc == NULL) + gma_crtc = kzalloc(sizeof(struct gma_crtc) + + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), + GFP_KERNEL); + if (gma_crtc == NULL) return; - psb_intel_crtc->crtc_state = + gma_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); - if (!psb_intel_crtc->crtc_state) { + if (!gma_crtc->crtc_state) { dev_err(dev->dev, "Crtc state error: No memory\n"); - kfree(psb_intel_crtc); + kfree(gma_crtc); return; } /* Set the CRTC operations from the chip specific data */ - drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); + drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs); /* Set the CRTC clock functions from chip specific data */ - psb_intel_crtc->clock_funcs = dev_priv->ops->clock_funcs; + gma_crtc->clock_funcs = dev_priv->ops->clock_funcs; - drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); - psb_intel_crtc->pipe = pipe; - psb_intel_crtc->plane = pipe; + drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256); + gma_crtc->pipe = pipe; + gma_crtc->plane = pipe; - r_base = psb_intel_crtc->base.gamma_store; + r_base = gma_crtc->base.gamma_store; g_base = r_base + 256; b_base = g_base + 256; for (i = 0; i < 256; i++) { - psb_intel_crtc->lut_r[i] = i; - psb_intel_crtc->lut_g[i] = i; - psb_intel_crtc->lut_b[i] = i; + gma_crtc->lut_r[i] = i; + gma_crtc->lut_g[i] = i; + gma_crtc->lut_b[i] = i; r_base[i] = i << 8; g_base[i] = i << 8; b_base[i] = i << 8; - psb_intel_crtc->lut_adj[i] = 0; + gma_crtc->lut_adj[i] = 0; } - psb_intel_crtc->mode_dev = mode_dev; - psb_intel_crtc->cursor_addr = 0; + gma_crtc->mode_dev = mode_dev; + gma_crtc->cursor_addr = 0; - drm_crtc_helper_add(&psb_intel_crtc->base, + drm_crtc_helper_add(&gma_crtc->base, dev_priv->ops->crtc_helper); /* Setup the array of drm_connector pointer array */ - psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; + gma_crtc->mode_set.crtc = &gma_crtc->base; BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); - dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = - &psb_intel_crtc->base; - dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = - &psb_intel_crtc->base; - psb_intel_crtc->mode_set.connectors = - (struct drm_connector **) (psb_intel_crtc + 1); - psb_intel_crtc->mode_set.num_connectors = 0; - psb_intel_cursor_init(dev, psb_intel_crtc); + dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL); + dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base; + dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base; + gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1); + gma_crtc->mode_set.num_connectors = 0; + psb_intel_cursor_init(dev, gma_crtc); /* Set to true so that the pipe is forced off on initial config. */ - psb_intel_crtc->active = true; + gma_crtc->active = true; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -565,7 +561,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; struct drm_mode_object *drmmode_obj; - struct psb_intel_crtc *crtc; + struct gma_crtc *crtc; if (!dev_priv) { dev_err(dev->dev, "called with no initialization\n"); @@ -580,7 +576,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return -EINVAL; } - crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); + crtc = to_gma_crtc(obj_to_crtc(drmmode_obj)); pipe_from_crtc_id->pipe = crtc->pipe; return 0; @@ -591,8 +587,8 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) struct drm_crtc *crtc = NULL; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); - if (psb_intel_crtc->pipe == pipe) + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + if (gma_crtc->pipe == pipe) break; } return crtc; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 1e1bf8bbfe5a..2a73b915347d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -162,7 +162,7 @@ struct psb_intel_crtc_state { uint32_t savePalette[256]; }; -struct psb_intel_crtc { +struct gma_crtc { struct drm_crtc base; int pipe; int plane; @@ -193,8 +193,8 @@ struct psb_intel_crtc { const struct gma_clock_funcs *clock_funcs; }; -#define to_psb_intel_crtc(x) \ - container_of(x, struct psb_intel_crtc, base) +#define to_gma_crtc(x) \ + container_of(x, struct gma_crtc, base) #define to_psb_intel_connector(x) \ container_of(x, struct psb_intel_connector, base) #define to_psb_intel_encoder(x) \ diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 42541e8ea0ad..861e2e5900f1 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -381,8 +381,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; - struct psb_intel_crtc *psb_intel_crtc = - to_psb_intel_crtc(encoder->crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; struct psb_intel_encoder *psb_intel_encoder = @@ -392,11 +391,11 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, panel_fixed_mode = mode_dev->panel_fixed_mode2; /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ - if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) { + if (!IS_MRST(dev) && gma_crtc->pipe == 0) { printk(KERN_ERR "Can't support LVDS on pipe A\n"); return false; } - if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { + if (IS_MRST(dev) && gma_crtc->pipe != 0) { printk(KERN_ERR "Must use PIPE A\n"); return false; } @@ -585,8 +584,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, return -1; if (!strcmp(property->name, "scaling mode")) { - struct psb_intel_crtc *crtc = - to_psb_intel_crtc(encoder->crtc); + struct gma_crtc *crtc = to_gma_crtc(encoder->crtc); uint64_t curval; if (!crtc) diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 631e01abc04b..730f3071f188 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -987,7 +987,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; - struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); u32 sdvox; struct psb_intel_sdvo_in_out_map in_out; @@ -1070,7 +1070,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; - if (psb_intel_crtc->pipe == 1) + if (gma_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; if (psb_intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; -- cgit v1.2.3 From a3d5d75f694396aa574c4dadbd6008e2cc9a2bbb Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Mon, 22 Jul 2013 17:05:25 +0200 Subject: drm/gma500: Rename psb_intel_connector to gma_connector The psb_intel_connector is generic and should be named appropriately Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_crt.c | 14 +++++++------- drivers/gpu/drm/gma500/cdv_intel_dp.c | 16 ++++++++-------- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 12 ++++++------ drivers/gpu/drm/gma500/cdv_intel_lvds.c | 12 ++++++------ drivers/gpu/drm/gma500/framebuffer.c | 2 +- drivers/gpu/drm/gma500/framebuffer.h | 2 +- drivers/gpu/drm/gma500/gma_display.c | 2 +- drivers/gpu/drm/gma500/mdfld_dsi_output.h | 8 ++++---- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 10 +++++----- drivers/gpu/drm/gma500/oaktrail_lvds.c | 12 ++++++------ drivers/gpu/drm/gma500/psb_intel_display.c | 2 +- drivers/gpu/drm/gma500/psb_intel_drv.h | 10 +++++----- drivers/gpu/drm/gma500/psb_intel_lvds.c | 15 +++++++-------- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 12 ++++++------ 14 files changed, 64 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 79ad19696f71..b2661f3a3047 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -259,7 +259,7 @@ void cdv_intel_crt_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct psb_intel_encoder *psb_intel_encoder; struct drm_connector *connector; struct drm_encoder *encoder; @@ -270,11 +270,11 @@ void cdv_intel_crt_init(struct drm_device *dev, if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); + if (!gma_connector) goto failed_connector; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; connector->polled = DRM_CONNECTOR_POLL_HPD; drm_connector_init(dev, connector, &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); @@ -283,7 +283,7 @@ void cdv_intel_crt_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); /* Set up the DDC bus. */ i2c_reg = GPIOA; @@ -317,8 +317,8 @@ void cdv_intel_crt_init(struct drm_device *dev, return; failed_ddc: drm_encoder_cleanup(&psb_intel_encoder->base); - drm_connector_cleanup(&psb_intel_connector->base); - kfree(psb_intel_connector); + drm_connector_cleanup(&gma_connector->base); + kfree(gma_connector); failed_connector: kfree(psb_intel_encoder); return; diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index a90adf629925..55de663c50b5 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -648,7 +648,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } static int -cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) +cdv_intel_dp_i2c_init(struct gma_connector *connector, struct psb_intel_encoder *encoder, const char *name) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; @@ -1803,7 +1803,7 @@ void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct cdv_intel_dp *intel_dp; @@ -1813,8 +1813,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); + if (!gma_connector) goto err_connector; intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); if (!intel_dp) @@ -1823,13 +1823,13 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) type = DRM_MODE_CONNECTOR_eDP; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); if (type == DRM_MODE_CONNECTOR_DisplayPort) psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; @@ -1864,7 +1864,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev cdv_disable_intel_clock_gating(dev); - cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); + cdv_intel_dp_i2c_init(gma_connector, psb_intel_encoder, name); /* FIXME:fail check */ cdv_intel_dp_add_properties(connector); @@ -1947,7 +1947,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev return; err_priv: - kfree(psb_intel_connector); + kfree(gma_connector); err_connector: kfree(psb_intel_encoder); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 5c3b3eabb5e9..b1290c303a58 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -295,7 +295,7 @@ void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int reg) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct mid_intel_hdmi_priv *hdmi_priv; @@ -307,10 +307,10 @@ void cdv_hdmi_init(struct drm_device *dev, if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); - if (!psb_intel_connector) + if (!gma_connector) goto err_connector; hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL); @@ -318,7 +318,7 @@ void cdv_hdmi_init(struct drm_device *dev, if (!hdmi_priv) goto err_priv; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; connector->polled = DRM_CONNECTOR_POLL_HPD; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, @@ -328,7 +328,7 @@ void cdv_hdmi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; hdmi_priv->hdmi_reg = reg; hdmi_priv->has_hdmi_sink = false; @@ -378,7 +378,7 @@ failed_ddc: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); err_priv: - kfree(psb_intel_connector); + kfree(gma_connector); err_connector: kfree(psb_intel_encoder); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 53f1d8f5e9cd..f4693ebfc098 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -611,7 +611,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct cdv_intel_lvds_priv *lvds_priv; struct drm_connector *connector; struct drm_encoder *encoder; @@ -633,9 +633,9 @@ void cdv_intel_lvds_init(struct drm_device *dev, if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); - if (!psb_intel_connector) + if (!gma_connector) goto failed_connector; lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL); @@ -644,7 +644,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, psb_intel_encoder->dev_priv = lvds_priv; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; encoder = &psb_intel_encoder->base; @@ -657,7 +657,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); @@ -791,7 +791,7 @@ failed_blc_i2c: drm_connector_cleanup(connector); kfree(lvds_priv); failed_lvds_priv: - kfree(psb_intel_connector); + kfree(gma_connector); failed_connector: kfree(psb_intel_encoder); } diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 6ebabb5ca3c2..bdc63c6ec5b8 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -744,7 +744,7 @@ static void psb_setup_outputs(struct drm_device *dev) } encoder->possible_crtcs = crtc_mask; encoder->possible_clones = - psb_intel_connector_clones(dev, clone_mask); + gma_connector_clones(dev, clone_mask); } } diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h index 989558a9e6ee..395f20b07aab 100644 --- a/drivers/gpu/drm/gma500/framebuffer.h +++ b/drivers/gpu/drm/gma500/framebuffer.h @@ -41,7 +41,7 @@ struct psb_fbdev { #define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) -extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); +extern int gma_connector_clones(struct drm_device *dev, int type_mask); #endif diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index b87180344ad6..eb460759f5d3 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -672,7 +672,7 @@ struct drm_encoder *gma_best_encoder(struct drm_connector *connector) return &psb_intel_encoder->base; } -void gma_connector_attach_encoder(struct psb_intel_connector *connector, +void gma_connector_attach_encoder(struct gma_connector *connector, struct psb_intel_encoder *encoder) { connector->encoder = encoder; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h index 36eb0744841c..d78b108e3f3a 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h @@ -227,7 +227,7 @@ enum { #define DSI_DPI_DISABLE_BTA BIT(3) struct mdfld_dsi_connector { - struct psb_intel_connector base; + struct gma_connector base; int pipe; void *private; @@ -269,11 +269,11 @@ struct mdfld_dsi_config { static inline struct mdfld_dsi_connector *mdfld_dsi_connector( struct drm_connector *connector) { - struct psb_intel_connector *psb_connector; + struct gma_connector *gma_connector; - psb_connector = to_psb_intel_connector(connector); + gma_connector = to_gma_connector(connector); - return container_of(psb_connector, struct mdfld_dsi_connector, base); + return container_of(gma_connector, struct mdfld_dsi_connector, base); } static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 059de197c942..846817fe95ce 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -641,7 +641,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; @@ -649,11 +649,11 @@ void oaktrail_hdmi_init(struct drm_device *dev, if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); + if (!gma_connector) goto failed_connector; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &oaktrail_hdmi_connector_funcs, @@ -663,7 +663,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, &oaktrail_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 7b1cfd6d1606..9d037720eb9a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -326,7 +326,7 @@ void oaktrail_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_psb_private *dev_priv = dev->dev_private; @@ -338,11 +338,11 @@ void oaktrail_lvds_init(struct drm_device *dev, if (!psb_intel_encoder) return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); + if (!gma_connector) goto failed_connector; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; encoder = &psb_intel_encoder->base; dev_priv->is_lvds_on = true; drm_connector_init(dev, connector, @@ -352,7 +352,7 @@ void oaktrail_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); @@ -440,7 +440,7 @@ failed_find: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); - kfree(psb_intel_connector); + kfree(gma_connector); failed_connector: kfree(psb_intel_encoder); } diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index c665c91dabde..6cee07013a2d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -594,7 +594,7 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) return crtc; } -int psb_intel_connector_clones(struct drm_device *dev, int type_mask) +int gma_connector_clones(struct drm_device *dev, int type_mask) { int index_mask = 0; struct drm_connector *connector; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 2a73b915347d..9a34e85c7421 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -137,7 +137,7 @@ struct psb_intel_encoder { struct psb_intel_i2c_chan *ddc_bus; }; -struct psb_intel_connector { +struct gma_connector { struct drm_connector base; struct psb_intel_encoder *encoder; }; @@ -195,8 +195,8 @@ struct gma_crtc { #define to_gma_crtc(x) \ container_of(x, struct gma_crtc, base) -#define to_psb_intel_connector(x) \ - container_of(x, struct psb_intel_connector, base) +#define to_gma_connector(x) \ + container_of(x, struct gma_connector, base) #define to_psb_intel_encoder(x) \ container_of(x, struct psb_intel_encoder, base) #define to_psb_intel_framebuffer(x) \ @@ -227,13 +227,13 @@ extern void mid_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int dsi_num); extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector); -extern void gma_connector_attach_encoder(struct psb_intel_connector *connector, +extern void gma_connector_attach_encoder(struct gma_connector *connector, struct psb_intel_encoder *encoder); static inline struct psb_intel_encoder *gma_attached_encoder( struct drm_connector *connector) { - return to_psb_intel_connector(connector)->encoder; + return to_gma_connector(connector)->encoder; } extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 861e2e5900f1..c08627554b50 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -690,7 +690,7 @@ void psb_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; + struct gma_connector *gma_connector; struct psb_intel_lvds_priv *lvds_priv; struct drm_connector *connector; struct drm_encoder *encoder; @@ -707,10 +707,9 @@ void psb_intel_lvds_init(struct drm_device *dev, return; } - psb_intel_connector = - kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) { - dev_err(dev->dev, "psb_intel_connector allocation error\n"); + gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); + if (!gma_connector) { + dev_err(dev->dev, "gma_connector allocation error\n"); goto failed_encoder; } @@ -722,7 +721,7 @@ void psb_intel_lvds_init(struct drm_device *dev, psb_intel_encoder->dev_priv = lvds_priv; - connector = &psb_intel_connector->base; + connector = &gma_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, @@ -732,7 +731,7 @@ void psb_intel_lvds_init(struct drm_device *dev, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); @@ -848,7 +847,7 @@ failed_blc_i2c: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); failed_connector: - kfree(psb_intel_connector); + kfree(gma_connector); failed_encoder: kfree(psb_intel_encoder); } diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 730f3071f188..7164c3c8f492 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -140,7 +140,7 @@ struct psb_intel_sdvo { }; struct psb_intel_sdvo_connector { - struct psb_intel_connector base; + struct gma_connector base; /* Mark the type of connector */ uint16_t output_flag; @@ -206,7 +206,7 @@ static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connecto static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector) { - return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base); + return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base); } static bool @@ -2074,7 +2074,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device) { struct drm_encoder *encoder = &psb_intel_sdvo->base.base; struct drm_connector *connector; - struct psb_intel_connector *intel_connector; + struct gma_connector *intel_connector; struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); @@ -2114,7 +2114,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type) { struct drm_encoder *encoder = &psb_intel_sdvo->base.base; struct drm_connector *connector; - struct psb_intel_connector *intel_connector; + struct gma_connector *intel_connector; struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); @@ -2153,7 +2153,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device) { struct drm_encoder *encoder = &psb_intel_sdvo->base.base; struct drm_connector *connector; - struct psb_intel_connector *intel_connector; + struct gma_connector *intel_connector; struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); @@ -2187,7 +2187,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device) { struct drm_encoder *encoder = &psb_intel_sdvo->base.base; struct drm_connector *connector; - struct psb_intel_connector *intel_connector; + struct gma_connector *intel_connector; struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); -- cgit v1.2.3 From 367e44080e20f77fa7b0f2db83fd6367da59b6c3 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Mon, 22 Jul 2013 17:45:26 +0200 Subject: drm/gma500: Rename psb_intel_encoder to gma_encoder The psb_intel_encoder is generic and should be named appropriately Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_intel_crt.c | 31 ++++--- drivers/gpu/drm/gma500/cdv_intel_display.c | 10 +-- drivers/gpu/drm/gma500/cdv_intel_dp.c | 130 +++++++++++++-------------- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 66 ++++++-------- drivers/gpu/drm/gma500/cdv_intel_lvds.c | 50 +++++------ drivers/gpu/drm/gma500/framebuffer.c | 7 +- drivers/gpu/drm/gma500/gma_display.c | 13 ++- drivers/gpu/drm/gma500/mdfld_dsi_output.h | 8 +- drivers/gpu/drm/gma500/mdfld_intel_display.c | 8 +- drivers/gpu/drm/gma500/oaktrail_crtc.c | 8 +- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 14 +-- drivers/gpu/drm/gma500/oaktrail_lvds.c | 37 ++++---- drivers/gpu/drm/gma500/psb_drv.c | 6 +- drivers/gpu/drm/gma500/psb_intel_display.c | 10 +-- drivers/gpu/drm/gma500/psb_intel_drv.h | 14 +-- drivers/gpu/drm/gma500/psb_intel_lvds.c | 49 +++++----- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 20 ++--- 17 files changed, 226 insertions(+), 255 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index b2661f3a3047..661af492173d 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -196,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect( static void cdv_intel_crt_destroy(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); + psb_intel_i2c_destroy(gma_encoder->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -207,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector) static int cdv_intel_crt_get_modes(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + return psb_intel_ddc_get_modes(connector, + &gma_encoder->ddc_bus->adapter); } static int cdv_intel_crt_set_property(struct drm_connector *connector, @@ -260,14 +259,14 @@ void cdv_intel_crt_init(struct drm_device *dev, { struct gma_connector *gma_connector; - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct drm_connector *connector; struct drm_encoder *encoder; u32 i2c_reg; - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); @@ -279,11 +278,11 @@ void cdv_intel_crt_init(struct drm_device *dev, drm_connector_init(dev, connector, &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_encoder_init(dev, encoder, &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, gma_encoder); /* Set up the DDC bus. */ i2c_reg = GPIOA; @@ -292,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev, if (dev_priv->crt_ddc_bus != 0) i2c_reg = dev_priv->crt_ddc_bus; }*/ - psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, + gma_encoder->ddc_bus = psb_intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); - if (!psb_intel_encoder->ddc_bus) { + if (!gma_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } - psb_intel_encoder->type = INTEL_OUTPUT_ANALOG; + gma_encoder->type = INTEL_OUTPUT_ANALOG; /* psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); @@ -316,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev, return; failed_ddc: - drm_encoder_cleanup(&psb_intel_encoder->base); + drm_encoder_cleanup(&gma_encoder->base); drm_connector_cleanup(&gma_connector->base); kfree(gma_connector); failed_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); return; } diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index e18c3b9fd07f..ee8a502348ce 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -497,14 +497,14 @@ static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) return false; list_for_each_entry(connector, &mode_config->connector_list, head) { - struct psb_intel_encoder *psb_intel_encoder = + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; - if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS) + if (gma_encoder->type == INTEL_OUTPUT_LVDS) return true; } @@ -632,15 +632,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, bool is_edp = false; list_for_each_entry(connector, &mode_config->connector_list, head) { - struct psb_intel_encoder *psb_intel_encoder = + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; - ddi_select = psb_intel_encoder->ddi_select; - switch (psb_intel_encoder->type) { + ddi_select = gma_encoder->ddi_select; + switch (gma_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 55de663c50b5..f4eb43573cad 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -69,7 +69,7 @@ struct cdv_intel_dp { uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[4]; - struct psb_intel_encoder *encoder; + struct gma_encoder *encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; uint8_t train_set[4]; @@ -115,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = { * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. */ -static bool is_edp(struct psb_intel_encoder *encoder) +static bool is_edp(struct gma_encoder *encoder) { return encoder->type == INTEL_OUTPUT_EDP; } -static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); -static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); -static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); +static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder); +static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder); +static void cdv_intel_dp_link_down(struct gma_encoder *encoder); static int -cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) +cdv_intel_dp_max_lane_count(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; int max_lane_count = 4; @@ -144,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) } static int -cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) +cdv_intel_dp_max_link_bw(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; @@ -181,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes) return (max_link_clock * max_lanes * 19) / 20; } -static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) +static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; @@ -201,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) msleep(intel_dp->panel_power_up_delay); } -static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) +static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; u32 pp; @@ -216,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) } /* Returns true if the panel was already on when called */ -static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) +static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; @@ -243,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) return false; } -static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) +static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; u32 pp, idle_off_mask = PP_ON ; @@ -275,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) DRM_DEBUG_KMS("Over\n"); } -static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) +static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; u32 pp; @@ -295,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) gma_backlight_enable(dev); } -static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) +static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; @@ -315,7 +315,7 @@ static int cdv_intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct psb_intel_encoder *encoder = gma_attached_encoder(connector); + struct gma_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); int max_lanes = cdv_intel_dp_max_lane_count(encoder); @@ -371,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) } static int -cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, +cdv_intel_dp_aux_ch(struct gma_encoder *encoder, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { @@ -473,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, /* Write data to the aux channel in native mode */ static int -cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, +cdv_intel_dp_aux_native_write(struct gma_encoder *encoder, uint16_t address, uint8_t *send, int send_bytes) { int ret; @@ -505,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, /* Write a single byte to the aux channel in native mode */ static int -cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, +cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder, uint16_t address, uint8_t byte) { return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); @@ -513,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, /* read bytes from a native aux channel */ static int -cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, +cdv_intel_dp_aux_native_read(struct gma_encoder *encoder, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; @@ -558,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, struct cdv_intel_dp *intel_dp = container_of(adapter, struct cdv_intel_dp, adapter); - struct psb_intel_encoder *encoder = intel_dp->encoder; + struct gma_encoder *encoder = intel_dp->encoder; uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; @@ -648,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } static int -cdv_intel_dp_i2c_init(struct gma_connector *connector, struct psb_intel_encoder *encoder, const char *name) +cdv_intel_dp_i2c_init(struct gma_connector *connector, + struct gma_encoder *encoder, const char *name) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; @@ -699,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo struct drm_display_mode *adjusted_mode) { struct drm_psb_private *dev_priv = encoder->dev->dev_private; - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; int lane_count, clock; int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); @@ -802,13 +803,13 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, * Find the lane count in the intel_encoder private */ list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct psb_intel_encoder *intel_encoder; + struct gma_encoder *intel_encoder; struct cdv_intel_dp *intel_dp; if (encoder->crtc != crtc) continue; - intel_encoder = to_psb_intel_encoder(encoder); + intel_encoder = to_gma_encoder(encoder); intel_dp = intel_encoder->dev_priv; if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; @@ -842,7 +843,7 @@ static void cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); struct drm_crtc *crtc = encoder->crtc; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; @@ -909,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode /* If the sink supports it, try to set the power state appropriately */ -static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) +static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret, i; @@ -941,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) static void cdv_intel_dp_prepare(struct drm_encoder *encoder) { - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); int edp = is_edp(intel_encoder); if (edp) { @@ -958,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder) static void cdv_intel_dp_commit(struct drm_encoder *encoder) { - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); int edp = is_edp(intel_encoder); if (edp) @@ -972,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder) static void cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) { - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; struct drm_device *dev = encoder->dev; uint32_t dp_reg = REG_READ(intel_dp->output_reg); @@ -1007,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) * cases where the sink may still be asleep. */ static bool -cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, +cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address, uint8_t *recv, int recv_bytes) { int ret, i; @@ -1032,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a * link status information */ static bool -cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) +cdv_intel_dp_get_link_status(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; return cdv_intel_dp_aux_native_read_retry(encoder, @@ -1106,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) } */ static void -cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) +cdv_intel_get_adjust_train(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; uint8_t v = 0; @@ -1165,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool -cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) +cdv_intel_channel_eq_ok(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; uint8_t lane_align; @@ -1185,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) } static bool -cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, +cdv_intel_dp_set_link_train(struct gma_encoder *encoder, uint32_t dp_reg_value, uint8_t dp_train_pat) { @@ -1212,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, static bool -cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, +cdv_intel_dplink_set_level(struct gma_encoder *encoder, uint8_t dp_train_pat) { @@ -1233,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, } static void -cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) +cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level) { struct drm_device *dev = encoder->base.dev; struct cdv_intel_dp *intel_dp = encoder->dev_priv; @@ -1299,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal /* Enable corresponding port and start training pattern 1 */ static void -cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) +cdv_intel_dp_start_link_train(struct gma_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct cdv_intel_dp *intel_dp = encoder->dev_priv; @@ -1393,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) } static void -cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) +cdv_intel_dp_complete_link_train(struct gma_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct cdv_intel_dp *intel_dp = encoder->dev_priv; @@ -1479,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) } static void -cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) +cdv_intel_dp_link_down(struct gma_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct cdv_intel_dp *intel_dp = encoder->dev_priv; @@ -1503,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) REG_READ(intel_dp->output_reg); } -static enum drm_connector_status -cdv_dp_detect(struct psb_intel_encoder *encoder) +static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder) { struct cdv_intel_dp *intel_dp = encoder->dev_priv; enum drm_connector_status status; @@ -1532,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder) static enum drm_connector_status cdv_intel_dp_detect(struct drm_connector *connector, bool force) { - struct psb_intel_encoder *encoder = gma_attached_encoder(connector); + struct gma_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; enum drm_connector_status status; struct edid *edid = NULL; @@ -1566,8 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force) static int cdv_intel_dp_get_modes(struct drm_connector *connector) { - struct psb_intel_encoder *intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *intel_encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; struct edid *edid = NULL; int ret = 0; @@ -1623,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector) static bool cdv_intel_dp_detect_audio(struct drm_connector *connector) { - struct psb_intel_encoder *encoder = gma_attached_encoder(connector); + struct gma_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; struct edid *edid; bool has_audio = false; @@ -1649,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector, uint64_t val) { struct drm_psb_private *dev_priv = connector->dev->dev_private; - struct psb_intel_encoder *encoder = gma_attached_encoder(connector); + struct gma_encoder *encoder = gma_attached_encoder(connector); struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; @@ -1702,11 +1701,10 @@ done: static void cdv_intel_dp_destroy(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv; - if (is_edp(psb_intel_encoder)) { + if (is_edp(gma_encoder)) { /* cdv_intel_panel_destroy_backlight(connector->dev); */ if (intel_dp->panel_fixed_mode) { kfree(intel_dp->panel_fixed_mode); @@ -1802,7 +1800,7 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev) void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; @@ -1810,8 +1808,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev const char *name = NULL; int type = DRM_MODE_CONNECTOR_DisplayPort; - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); if (!gma_connector) @@ -1824,21 +1822,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev type = DRM_MODE_CONNECTOR_eDP; connector = &gma_connector->base; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, gma_encoder); if (type == DRM_MODE_CONNECTOR_DisplayPort) - psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT; else - psb_intel_encoder->type = INTEL_OUTPUT_EDP; + gma_encoder->type = INTEL_OUTPUT_EDP; - psb_intel_encoder->dev_priv=intel_dp; - intel_dp->encoder = psb_intel_encoder; + gma_encoder->dev_priv=intel_dp; + intel_dp->encoder = gma_encoder; intel_dp->output_reg = output_reg; drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); @@ -1854,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev switch (output_reg) { case DP_B: name = "DPDDC-B"; - psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); + gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT); break; case DP_C: name = "DPDDC-C"; - psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); + gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT); break; } cdv_disable_intel_clock_gating(dev); - cdv_intel_dp_i2c_init(gma_connector, psb_intel_encoder, name); + cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name); /* FIXME:fail check */ cdv_intel_dp_add_properties(connector); - if (is_edp(psb_intel_encoder)) { + if (is_edp(gma_encoder)) { int ret; struct edp_power_seq cur; u32 pp_on, pp_off, pp_div; @@ -1922,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - cdv_intel_edp_panel_vdd_on(psb_intel_encoder); - ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, + cdv_intel_edp_panel_vdd_on(gma_encoder); + ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV, intel_dp->dpcd, sizeof(intel_dp->dpcd)); - cdv_intel_edp_panel_vdd_off(psb_intel_encoder); + cdv_intel_edp_panel_vdd_off(gma_encoder); if (ret == 0) { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); @@ -1949,5 +1947,5 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev err_priv: kfree(gma_connector); err_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index b1290c303a58..1c0d723b8d24 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -64,8 +64,8 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); - struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); + struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv; u32 hdmib; struct drm_crtc *crtc = encoder->crtc; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); @@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder, static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; - struct psb_intel_encoder *psb_intel_encoder = - to_psb_intel_encoder(encoder); - struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); + struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv; u32 hdmib; hdmib = REG_READ(hdmi_priv->hdmi_reg); @@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) static void cdv_hdmi_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv; hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); } @@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector) static void cdv_hdmi_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv; REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); REG_READ(hdmi_priv->hdmi_reg); @@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector) static enum drm_connector_status cdv_hdmi_detect( struct drm_connector *connector, bool force) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; - edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); + edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter); hdmi_priv->has_hdmi_sink = false; hdmi_priv->has_hdmi_audio = false; @@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector, */ static int cdv_hdmi_get_modes(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct edid *edid = NULL; int ret = 0; - edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); + edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter); if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); @@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector, static void cdv_hdmi_destroy(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (psb_intel_encoder->i2c_bus) - psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); + if (gma_encoder->i2c_bus) + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -294,17 +288,16 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int reg) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct mid_intel_hdmi_priv *hdmi_priv; int ddc_bus; - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), - GFP_KERNEL); + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), @@ -320,7 +313,7 @@ void cdv_hdmi_init(struct drm_device *dev, connector = &gma_connector->base; connector->polled = DRM_CONNECTOR_POLL_HPD; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); @@ -328,11 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); - psb_intel_encoder->type = INTEL_OUTPUT_HDMI; + gma_connector_attach_encoder(gma_connector, gma_encoder); + gma_encoder->type = INTEL_OUTPUT_HDMI; hdmi_priv->hdmi_reg = reg; hdmi_priv->has_hdmi_sink = false; - psb_intel_encoder->dev_priv = hdmi_priv; + gma_encoder->dev_priv = hdmi_priv; drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs); drm_connector_helper_add(connector, @@ -348,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev, switch (reg) { case SDVOB: ddc_bus = GPIOE; - psb_intel_encoder->ddi_select = DDI0_SELECT; + gma_encoder->ddi_select = DDI0_SELECT; break; case SDVOC: ddc_bus = GPIOD; - psb_intel_encoder->ddi_select = DDI1_SELECT; + gma_encoder->ddi_select = DDI1_SELECT; break; default: DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); @@ -360,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev, break; } - psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, + gma_encoder->i2c_bus = psb_intel_i2c_create(dev, ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC"); - if (!psb_intel_encoder->i2c_bus) { + if (!gma_encoder->i2c_bus) { dev_err(dev->dev, "No ddc adapter available!\n"); goto failed_ddc; } - hdmi_priv->hdmi_i2c_adapter = - &(psb_intel_encoder->i2c_bus->adapter); + hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter); hdmi_priv->dev = dev; drm_sysfs_connector_add(connector); return; @@ -380,5 +372,5 @@ failed_ddc: err_priv: kfree(gma_connector); err_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index f4693ebfc098..20e08e65d46c 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -406,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; int ret; - ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter); + ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter); if (ret) return ret; @@ -443,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) */ static void cdv_intel_lvds_destroy(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (psb_intel_encoder->i2c_bus) - psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); + if (gma_encoder->i2c_bus) + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -610,7 +608,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev, void cdv_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct cdv_intel_lvds_priv *lvds_priv; struct drm_connector *connector; @@ -628,9 +626,9 @@ void cdv_intel_lvds_init(struct drm_device *dev, return; } - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), @@ -642,10 +640,10 @@ void cdv_intel_lvds_init(struct drm_device *dev, if (!lvds_priv) goto failed_lvds_priv; - psb_intel_encoder->dev_priv = lvds_priv; + gma_encoder->dev_priv = lvds_priv; connector = &gma_connector->base; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, @@ -657,8 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); - psb_intel_encoder->type = INTEL_OUTPUT_LVDS; + gma_connector_attach_encoder(gma_connector, gma_encoder); + gma_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); drm_connector_helper_add(connector, @@ -679,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev, * Set up I2C bus * FIXME: distroy i2c_bus when exit */ - psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, + gma_encoder->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B"); - if (!psb_intel_encoder->i2c_bus) { + if (!gma_encoder->i2c_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "I2C bus registration failed.\n"); goto failed_blc_i2c; } - psb_intel_encoder->i2c_bus->slave_addr = 0x2C; - dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus; + gma_encoder->i2c_bus->slave_addr = 0x2C; + dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus; /* * LVDS discovery: @@ -701,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev, */ /* Set up the DDC bus. */ - psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, + gma_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); - if (!psb_intel_encoder->ddc_bus) { + if (!gma_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; @@ -715,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, * preferred mode is the right one. */ psb_intel_ddc_get_modes(connector, - &psb_intel_encoder->ddc_bus->adapter); + &gma_encoder->ddc_bus->adapter); list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = @@ -779,12 +777,12 @@ out: failed_find: printk(KERN_ERR "Failed find\n"); - if (psb_intel_encoder->ddc_bus) - psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); + if (gma_encoder->ddc_bus) + psb_intel_i2c_destroy(gma_encoder->ddc_bus); failed_ddc: printk(KERN_ERR "Failed DDC\n"); - if (psb_intel_encoder->i2c_bus) - psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); + if (gma_encoder->i2c_bus) + psb_intel_i2c_destroy(gma_encoder->i2c_bus); failed_blc_i2c: printk(KERN_ERR "Failed BLC\n"); drm_encoder_cleanup(encoder); @@ -793,5 +791,5 @@ failed_blc_i2c: failed_lvds_priv: kfree(gma_connector); failed_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); } diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index bdc63c6ec5b8..01dd7d225762 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -703,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct drm_encoder *encoder = &psb_intel_encoder->base; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct drm_encoder *encoder = &gma_encoder->base; int crtc_mask = 0, clone_mask = 0; /* valid crtcs */ - switch (psb_intel_encoder->type) { + switch (gma_encoder->type) { case INTEL_OUTPUT_ANALOG: crtc_mask = (1 << 0); clone_mask = (1 << INTEL_OUTPUT_ANALOG); diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index eb460759f5d3..cd253caf5672 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -37,9 +37,9 @@ bool gma_pipe_has_type(struct drm_crtc *crtc, int type) list_for_each_entry(l_entry, &mode_config->connector_list, head) { if (l_entry->encoder && l_entry->encoder->crtc == crtc) { - struct psb_intel_encoder *psb_intel_encoder = + struct gma_encoder *gma_encoder = gma_attached_encoder(l_entry); - if (psb_intel_encoder->type == type) + if (gma_encoder->type == type) return true; } } @@ -657,7 +657,7 @@ void gma_encoder_commit(struct drm_encoder *encoder) void gma_encoder_destroy(struct drm_encoder *encoder) { - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct gma_encoder *intel_encoder = to_gma_encoder(encoder); drm_encoder_cleanup(encoder); kfree(intel_encoder); @@ -666,14 +666,13 @@ void gma_encoder_destroy(struct drm_encoder *encoder) /* Currently there is only a 1:1 mapping of encoders and connectors */ struct drm_encoder *gma_best_encoder(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - return &psb_intel_encoder->base; + return &gma_encoder->base; } void gma_connector_attach_encoder(struct gma_connector *connector, - struct psb_intel_encoder *encoder) + struct gma_encoder *encoder) { connector->encoder = encoder; drm_mode_connector_attach_encoder(&connector->base, diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h index d78b108e3f3a..45d5af0546bf 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h @@ -238,7 +238,7 @@ struct mdfld_dsi_connector { }; struct mdfld_dsi_encoder { - struct psb_intel_encoder base; + struct gma_encoder base; void *private; }; @@ -279,11 +279,11 @@ static inline struct mdfld_dsi_connector *mdfld_dsi_connector( static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( struct drm_encoder *encoder) { - struct psb_intel_encoder *psb_encoder; + struct gma_encoder *gma_encoder; - psb_encoder = to_psb_intel_encoder(encoder); + gma_encoder = to_gma_encoder(encoder); - return container_of(psb_encoder, struct mdfld_dsi_encoder, base); + return container_of(gma_encoder, struct mdfld_dsi_encoder, base); } static inline struct mdfld_dsi_config * diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index adb29e370ce5..321c00a944e9 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -681,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, u32 dpll = 0, fp = 0; bool is_mipi = false, is_mipi2 = false, is_hdmi = false; struct drm_mode_config *mode_config = &dev->mode_config; - struct psb_intel_encoder *psb_intel_encoder = NULL; + struct gma_encoder *gma_encoder = NULL; uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; struct drm_encoder *encoder; struct drm_connector *connector; @@ -747,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; - psb_intel_encoder = gma_attached_encoder(connector); + gma_encoder = gma_attached_encoder(connector); - switch (psb_intel_encoder->type) { + switch (gma_encoder->type) { case INTEL_OUTPUT_MIPI: is_mipi = true; break; @@ -800,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->pos, 0); - if (psb_intel_encoder) + if (gma_encoder) drm_object_property_get_value(&connector->base, dev->mode_config.scaling_mode_property, &scalingType); diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 284e62ae7b57..54c98962b73e 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, bool is_lvds = false; bool is_mipi = false; struct drm_mode_config *mode_config = &dev->mode_config; - struct psb_intel_encoder *psb_intel_encoder = NULL; + struct gma_encoder *gma_encoder = NULL; uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; struct drm_connector *connector; @@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, if (!connector->encoder || connector->encoder->crtc != crtc) continue; - psb_intel_encoder = gma_attached_encoder(connector); + gma_encoder = gma_attached_encoder(connector); - switch (psb_intel_encoder->type) { + switch (gma_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; @@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); - if (psb_intel_encoder) + if (gma_encoder) drm_object_property_get_value(&connector->base, dev->mode_config.scaling_mode_property, &scalingType); diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 846817fe95ce..38153143ed8c 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -640,13 +640,13 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = { void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); @@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, goto failed_connector; connector = &gma_connector->base; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &oaktrail_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); @@ -663,9 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev, &oaktrail_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); + gma_connector_attach_encoder(gma_connector, gma_encoder); - psb_intel_encoder->type = INTEL_OUTPUT_HDMI; + gma_encoder->type = INTEL_OUTPUT_HDMI; drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs); @@ -678,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, return; failed_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); } static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 9d037720eb9a..e77d7214fca4 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -43,7 +43,7 @@ * Sets the power state for the panel. */ static void oaktrail_lvds_set_power(struct drm_device *dev, - struct psb_intel_encoder *psb_intel_encoder, + struct gma_encoder *gma_encoder, bool on) { u32 pp_status; @@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev, static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; - struct psb_intel_encoder *psb_intel_encoder = - to_psb_intel_encoder(encoder); + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); if (mode == DRM_MODE_DPMS_ON) - oaktrail_lvds_set_power(dev, psb_intel_encoder, true); + oaktrail_lvds_set_power(dev, gma_encoder, true); else - oaktrail_lvds_set_power(dev, psb_intel_encoder, false); + oaktrail_lvds_set_power(dev, gma_encoder, false); /* XXX: We never power down the LVDS pairs. */ } @@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder = - to_psb_intel_encoder(encoder); + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (!gma_power_begin(dev, true)) @@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder) mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); - oaktrail_lvds_set_power(dev, psb_intel_encoder, false); + oaktrail_lvds_set_power(dev, gma_encoder, false); gma_power_end(dev); } @@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder = - to_psb_intel_encoder(encoder); + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (mode_dev->backlight_duty_cycle == 0) mode_dev->backlight_duty_cycle = oaktrail_lvds_get_max_backlight(dev); - oaktrail_lvds_set_power(dev, psb_intel_encoder, true); + oaktrail_lvds_set_power(dev, gma_encoder, true); } static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = { @@ -325,7 +322,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev, void oaktrail_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct drm_connector *connector; struct drm_encoder *encoder; @@ -334,8 +331,8 @@ void oaktrail_lvds_init(struct drm_device *dev, struct i2c_adapter *i2c_adap; struct drm_display_mode *scan; /* *modes, *bios_mode; */ - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); + if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); @@ -343,7 +340,7 @@ void oaktrail_lvds_init(struct drm_device *dev, goto failed_connector; connector = &gma_connector->base; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; dev_priv->is_lvds_on = true; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, @@ -352,8 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); - psb_intel_encoder->type = INTEL_OUTPUT_LVDS; + gma_connector_attach_encoder(gma_connector, gma_encoder); + gma_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); drm_connector_helper_add(connector, @@ -433,8 +430,8 @@ out: failed_find: dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); - if (psb_intel_encoder->ddc_bus) - psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); + if (gma_encoder->ddc_bus) + psb_intel_i2c_destroy(gma_encoder->ddc_bus); /* failed_ddc: */ @@ -442,6 +439,6 @@ failed_find: drm_connector_cleanup(connector); kfree(gma_connector); failed_connector: - kfree(psb_intel_encoder); + kfree(gma_encoder); } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 6c2d139aab8b..da6a88e67b01 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) unsigned long irqflags; int ret = -ENOMEM; struct drm_connector *connector; - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (dev_priv == NULL) @@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) /* Only add backlight support if we have LVDS output */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - psb_intel_encoder = gma_attached_encoder(connector); + gma_encoder = gma_attached_encoder(connector); - switch (psb_intel_encoder->type) { + switch (gma_encoder->type) { case INTEL_OUTPUT_LVDS: case INTEL_OUTPUT_MIPI: ret = gma_backlight_init(dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 6cee07013a2d..97f8a03fee43 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -126,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, } list_for_each_entry(connector, &mode_config->connector_list, head) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; - switch (psb_intel_encoder->type) { + switch (gma_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; @@ -602,9 +601,8 @@ int gma_connector_clones(struct drm_device *dev, int type_mask) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - if (type_mask & (1 << psb_intel_encoder->type)) + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + if (type_mask & (1 << gma_encoder->type)) index_mask |= (1 << entry); entry++; } diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 9a34e85c7421..bde27fdb41bf 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -117,11 +117,11 @@ struct psb_intel_i2c_chan { u8 slave_addr; }; -struct psb_intel_encoder { +struct gma_encoder { struct drm_encoder base; int type; bool needs_tv_clock; - void (*hot_plug)(struct psb_intel_encoder *); + void (*hot_plug)(struct gma_encoder *); int crtc_mask; int clone_mask; u32 ddi_select; /* Channel info */ @@ -139,7 +139,7 @@ struct psb_intel_encoder { struct gma_connector { struct drm_connector base; - struct psb_intel_encoder *encoder; + struct gma_encoder *encoder; }; struct psb_intel_crtc_state { @@ -197,8 +197,8 @@ struct gma_crtc { container_of(x, struct gma_crtc, base) #define to_gma_connector(x) \ container_of(x, struct gma_connector, base) -#define to_psb_intel_encoder(x) \ - container_of(x, struct psb_intel_encoder, base) +#define to_gma_encoder(x) \ + container_of(x, struct gma_encoder, base) #define to_psb_intel_framebuffer(x) \ container_of(x, struct psb_intel_framebuffer, base) @@ -228,9 +228,9 @@ extern void mid_dsi_init(struct drm_device *dev, extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector); extern void gma_connector_attach_encoder(struct gma_connector *connector, - struct psb_intel_encoder *encoder); + struct gma_encoder *encoder); -static inline struct psb_intel_encoder *gma_attached_encoder( +static inline struct gma_encoder *gma_attached_encoder( struct drm_connector *connector) { return to_gma_connector(connector)->encoder; diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index c08627554b50..32342f6990d9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = - (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; + (struct psb_intel_lvds_priv *)gma_encoder->dev_priv; lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); @@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; u32 pp_status; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = - (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; + (struct psb_intel_lvds_priv *)gma_encoder->dev_priv; dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON, @@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct drm_display_mode *fixed_mode = dev_priv->mode_dev.panel_fixed_mode; - if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) + if (gma_encoder->type == INTEL_OUTPUT_MIPI2) fixed_mode = dev_priv->mode_dev.panel_fixed_mode2; /* just in case */ @@ -384,10 +381,9 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; - struct psb_intel_encoder *psb_intel_encoder = - to_psb_intel_encoder(encoder); + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); - if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) + if (gma_encoder->type == INTEL_OUTPUT_MIPI2) panel_fixed_mode = mode_dev->panel_fixed_mode2; /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ @@ -524,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv; int ret = 0; if (!IS_MRST(dev)) @@ -563,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) */ void psb_intel_lvds_destroy(struct drm_connector *connector) { - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv; if (lvds_priv->ddc_bus) psb_intel_i2c_destroy(lvds_priv->ddc_bus); @@ -689,7 +683,7 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { void psb_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct gma_connector *gma_connector; struct psb_intel_lvds_priv *lvds_priv; struct drm_connector *connector; @@ -700,10 +694,9 @@ void psb_intel_lvds_init(struct drm_device *dev, u32 lvds; int pipe; - psb_intel_encoder = - kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) { - dev_err(dev->dev, "psb_intel_encoder allocation error\n"); + gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); + if (!gma_encoder) { + dev_err(dev->dev, "gma_encoder allocation error\n"); return; } @@ -719,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev, goto failed_connector; } - psb_intel_encoder->dev_priv = lvds_priv; + gma_encoder->dev_priv = lvds_priv; connector = &gma_connector->base; - encoder = &psb_intel_encoder->base; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -731,8 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - gma_connector_attach_encoder(gma_connector, psb_intel_encoder); - psb_intel_encoder->type = INTEL_OUTPUT_LVDS; + gma_connector_attach_encoder(gma_connector, gma_encoder); + gma_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); drm_connector_helper_add(connector, @@ -849,6 +842,6 @@ failed_blc_i2c: failed_connector: kfree(gma_connector); failed_encoder: - kfree(psb_intel_encoder); + kfree(gma_encoder); } diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 7164c3c8f492..77841a113617 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -65,7 +65,7 @@ static const char *tv_format_names[] = { #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) struct psb_intel_sdvo { - struct psb_intel_encoder base; + struct gma_encoder base; struct i2c_adapter *i2c; u8 slave_addr; @@ -1836,10 +1836,8 @@ done: static void psb_intel_sdvo_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct psb_intel_encoder *psb_intel_encoder = - gma_attached_encoder(connector); - struct psb_intel_sdvo *sdvo = - to_psb_intel_sdvo(&psb_intel_encoder->base); + struct gma_encoder *gma_encoder = gma_attached_encoder(connector); + struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base); sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg); } @@ -2539,7 +2537,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo, bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_encoder *psb_intel_encoder; + struct gma_encoder *gma_encoder; struct psb_intel_sdvo *psb_intel_sdvo; int i; @@ -2556,9 +2554,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) } /* encoder type will be decided later */ - psb_intel_encoder = &psb_intel_sdvo->base; - psb_intel_encoder->type = INTEL_OUTPUT_SDVO; - drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0); + gma_encoder = &psb_intel_sdvo->base; + gma_encoder->type = INTEL_OUTPUT_SDVO; + drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { @@ -2576,7 +2574,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; - drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs); + drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps)) @@ -2619,7 +2617,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) return true; err: - drm_encoder_cleanup(&psb_intel_encoder->base); + drm_encoder_cleanup(&gma_encoder->base); i2c_del_adapter(&psb_intel_sdvo->ddc); kfree(psb_intel_sdvo); -- cgit v1.2.3 From 164c8598450657d01fa75d6c997e95eb35672eef Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 20 Jul 2013 20:27:08 +0100 Subject: drm/i915: Add some debug breadcrumbs to connector detection Try to decypher detection failures is a little tricker at the moment as the only indicator of progress is when output_poll_execute() tells us the result after the connector->detect() has run. This patch adds a telltale to the start of each detect function so that we can track progress and associate activity more clearly with each connector. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 4 ++++ drivers/gpu/drm/i915/intel_dp.c | 3 +++ drivers/gpu/drm/i915/intel_dvo.c | 2 ++ drivers/gpu/drm/i915/intel_hdmi.c | 3 +++ drivers/gpu/drm/i915/intel_lvds.c | 3 +++ drivers/gpu/drm/i915/intel_sdvo.c | 3 +++ drivers/gpu/drm/i915/intel_tv.c | 4 ++++ 7 files changed, 22 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3acec8c48166..0c0d4e8d768e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -613,6 +613,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; struct intel_load_detect_pipe tmp; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, drm_get_connector_name(connector), + force); + if (I915_HAS_HOTPLUG(dev)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d391ce38ec25..1761877b72ee 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2815,6 +2815,9 @@ intel_dp_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; struct edid *edid = NULL; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index cbbc49dc03be..8b4ad27791f3 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -332,6 +332,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..af18da76c04b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -866,6 +866,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct edid *edid; enum drm_connector_status status = connector_status_disconnected; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; intel_hdmi->rgb_quant_range_selectable = false; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b0e1088b2c97..25ea40696144 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -339,6 +339,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force) struct drm_device *dev = connector->dev; enum drm_connector_status status; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + status = intel_panel_detect(dev); if (status != connector_status_unknown) return status; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 798df114cfd3..c3b59b8593b9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1696,6 +1696,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, &response, 2)) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 39debd80d190..b0b446f630f7 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1305,6 +1305,10 @@ intel_tv_detect(struct drm_connector *connector, bool force) struct intel_tv *intel_tv = intel_attached_tv(connector); int type; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, drm_get_connector_name(connector), + force); + mode = reported_modes[0]; if (force) { -- cgit v1.2.3 From bc6bc15bd7d6bbe3dd2da65d1a81a6dec5d0fa94 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 22 Jul 2013 12:12:38 +0200 Subject: drm/i915: fix up error cleanup in i915_gem_object_bind_to_gtt This has been broken in commit 2f63315692b1d3c055972ad33fc7168ae908b97b Author: Ben Widawsky Date: Wed Jul 17 12:19:03 2013 -0700 drm/i915: Create VMAs which resulted in an OOPS the first time around we've hit -ENOSPC. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67156 Cc: Imre Deak Cc: Ben Widawsky Tested-by: meng Reviewed-by: Chris Wilson Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index acc99d458143..c4653df5799b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3137,8 +3137,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); if (IS_ERR(vma)) { - i915_gem_object_unpin_pages(obj); - return PTR_ERR(vma); + ret = PTR_ERR(vma); + goto err_unpin; } search_free: @@ -3154,17 +3154,17 @@ search_free: if (ret == 0) goto search_free; - goto err_out; + goto err_free_vma; } if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, obj->cache_level))) { ret = -EINVAL; - goto err_out; + goto err_remove_node; } ret = i915_gem_gtt_prepare_object(obj); if (ret) - goto err_out; + goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &vm->inactive_list); @@ -3183,9 +3183,11 @@ search_free: i915_gem_verify_gtt(dev); return 0; -err_out: +err_remove_node: drm_mm_remove_node(&vma->node); +err_free_vma: i915_gem_vma_destroy(vma); +err_unpin: i915_gem_object_unpin_pages(obj); return ret; } -- cgit v1.2.3 From 2fa86a1fea14c3019b2de16ea47e1a5363c60905 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 23 Jul 2013 11:19:24 -0300 Subject: drm/i915: extend lpt_enable_clkout_dp Now it implements 3 different sequences from BSpec and also has support for ULT. v2: - Change IS_ULT checks for LPT-LP checks - Add check for LPT-LP + with_fdi (Ben) - Merge DBUFF0/GEN0 bit definitions since they're the same register (Ben) - DBUFF0 (1<<0) is Disable, not Enable Reviewed-by: Ben Widawsky Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 3 ++- drivers/gpu/drm/i915/intel_display.c | 43 +++++++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e20f0937b3ec..0dfcbad4eb7b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4942,7 +4942,8 @@ #define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) #define SBI_DBUFF0 0x2a00 -#define SBI_DBUFF0_ENABLE (1<<0) +#define SBI_GEN0 0x1f00 +#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) /* LPT PIXCLK_GATE */ #define PIXCLK_GATE 0xC6020 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a8fc924e7788..22e3f341366c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5263,11 +5263,23 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); } -/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ -static void lpt_enable_clkout_dp(struct drm_device *dev) +/* Implements 3 different sequences from BSpec chapter "Display iCLK + * Programming" based on the parameters passed: + * - Sequence to enable CLKOUT_DP + * - Sequence to enable CLKOUT_DP without spread + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O + */ +static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, + bool with_fdi) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t tmp; + uint32_t reg, tmp; + + if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) + with_spread = true; + if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && + with_fdi, "LP PCH doesn't have FDI\n")) + with_fdi = false; mutex_lock(&dev_priv->dpio_lock); @@ -5278,17 +5290,22 @@ static void lpt_enable_clkout_dp(struct drm_device *dev) udelay(24); - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + if (with_spread) { + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - lpt_reset_fdi_mphy(dev_priv); - lpt_program_fdi_mphy(dev_priv); + if (with_fdi) { + lpt_reset_fdi_mphy(dev_priv); + lpt_program_fdi_mphy(dev_priv); + } + } - /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ - tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); - tmp |= SBI_DBUFF0_ENABLE; - intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); + reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? + SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); mutex_unlock(&dev_priv->dpio_lock); } @@ -5310,7 +5327,7 @@ static void lpt_init_pch_refclk(struct drm_device *dev) if (!has_vga) return; - lpt_enable_clkout_dp(dev); + lpt_enable_clkout_dp(dev, true, true); } /* -- cgit v1.2.3 From 47701c3ba26cb33ebe8a5e899ec922ab0de621a3 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 23 Jul 2013 11:19:25 -0300 Subject: drm/i915: disable CLKOUT_DP when it's not needed We currently don't support HDMI clock bending nor use SSC for DP or HDMI on Haswell, so the only case where we need CLKOUT_DP is for VGA. v2: - Replace the IS_ULT check for LPT-LP - Simplify GEN0/DBUFF0 check due to change on the previous patch - Also check for SBI_SSCCTL_DISABLE (Ben). Reviewed-by: Ben Widawsky Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 22e3f341366c..26b49d8d781c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5310,6 +5310,34 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, mutex_unlock(&dev_priv->dpio_lock); } +/* Sequence to disable CLKOUT_DP */ +static void lpt_disable_clkout_dp(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t reg, tmp; + + mutex_lock(&dev_priv->dpio_lock); + + reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? + SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + if (!(tmp & SBI_SSCCTL_DISABLE)) { + if (!(tmp & SBI_SSCCTL_PATHALT)) { + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + udelay(32); + } + tmp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + } + + mutex_unlock(&dev_priv->dpio_lock); +} + static void lpt_init_pch_refclk(struct drm_device *dev) { struct drm_mode_config *mode_config = &dev->mode_config; @@ -5324,10 +5352,10 @@ static void lpt_init_pch_refclk(struct drm_device *dev) } } - if (!has_vga) - return; - - lpt_enable_clkout_dp(dev, true, true); + if (has_vga) + lpt_enable_clkout_dp(dev, true, true); + else + lpt_disable_clkout_dp(dev); } /* -- cgit v1.2.3 From be256dc70284c028d0dd828b18b8f804e310507b Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 23 Jul 2013 11:19:26 -0300 Subject: drm/i915: add functions to disable and restore LCPLL For now there are no callers, but these functions are going to be needed for the code that allows Package C8+. Other future features may also require this code. Also merge the commit which introduced assert_can_disable_lcpll and had the following commit message: Most of the hardware needs to be disabled before LCPLL is disabled, so let's add a function to assert some of items listed in the "Display Sequences for LCPLL disabling" documentation. The idea is that hsw_disable_lcpll should not disable the hardware, the callers need to take care of calling hsw_disable_lcpll only once everything is already disabled. v2: - Rebase. - Fix D_COMP wait timeout. v3: - Use wait_for_atomic_use (Ben) - Remove/add a useless/needed POSTING_READ (Ben) - Early return in case LCPLL is already restored (Ben) - Add ndelay(100) (Ben) v4: - Merge the commit that added assert_can_disable_lcpll (Ben) - Add interrupt assertions (Ben) Reviewed-by: Ben Widawsky Signed-off-by: Paulo Zanoni [danvet: Fix compile fail since there's no HAS_LP_PCH yet.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 15 ++++ drivers/gpu/drm/i915/intel_display.c | 136 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 3 + 3 files changed, 154 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0dfcbad4eb7b..6caa748fa00f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2261,6 +2261,8 @@ #define BLC_PWM_CPU_CTL2 0x48250 #define BLC_PWM_CPU_CTL 0x48254 +#define HSW_BLC_PWM2_CTL 0x48350 + /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ #define BLC_PWM_PCH_CTL1 0xc8250 @@ -2269,6 +2271,12 @@ #define BLM_PCH_POLARITY (1 << 29) #define BLC_PWM_PCH_CTL2 0xc8254 +#define UTIL_PIN_CTL 0x48400 +#define UTIL_PIN_ENABLE (1 << 31) + +#define PCH_GTC_CTL 0xe7000 +#define PCH_GTC_ENABLE (1 << 31) + /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ @@ -5009,7 +5017,14 @@ #define LCPLL_CLK_FREQ_450 (0<<26) #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) +#define LCPLL_POWER_DOWN_ALLOW (1<<22) #define LCPLL_CD_SOURCE_FCLK (1<<21) +#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) + +#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +#define D_COMP_RCOMP_IN_PROGRESS (1<<9) +#define D_COMP_COMP_FORCE (1<<8) +#define D_COMP_COMP_DISABLE (1<<0) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 26b49d8d781c..32024dadccee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5925,6 +5925,142 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, return true; } +static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_ddi_plls *plls = &dev_priv->ddi_plls; + struct intel_crtc *crtc; + unsigned long irqflags; + uint32_t val, pch_hpd_mask; + + pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT; + if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)) + pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) + WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", + pipe_name(crtc->pipe)); + + WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); + WARN(plls->spll_refcount, "SPLL enabled\n"); + WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); + WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); + WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); + WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + "CPU PWM1 enabled\n"); + WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); + WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + "PCH PWM1 enabled\n"); + WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Utility pin enabled\n"); + WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + val = I915_READ(DEIMR); + WARN((val & ~DE_PCH_EVENT_IVB) != val, + "Unexpected DEIMR bits enabled: 0x%x\n", val); + val = I915_READ(SDEIMR); + WARN((val & ~pch_hpd_mask) != val, + "Unexpected SDEIMR bits enabled: 0x%x\n", val); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +/* + * This function implements pieces of two sequences from BSpec: + * - Sequence for display software to disable LCPLL + * - Sequence for display software to allow package C8+ + * The steps implemented here are just the steps that actually touch the LCPLL + * register. Callers should take care of disabling all the display engine + * functions, doing the mode unset, fixing interrupts, etc. + */ +void hsw_disable_lcpll(struct drm_i915_private *dev_priv, + bool switch_to_fclk, bool allow_power_down) +{ + uint32_t val; + + assert_can_disable_lcpll(dev_priv); + + val = I915_READ(LCPLL_CTL); + + if (switch_to_fclk) { + val |= LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) + DRM_ERROR("Switching to FCLK failed\n"); + + val = I915_READ(LCPLL_CTL); + } + + val |= LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + + if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) + DRM_ERROR("LCPLL still locked\n"); + + val = I915_READ(D_COMP); + val |= D_COMP_COMP_DISABLE; + I915_WRITE(D_COMP, val); + POSTING_READ(D_COMP); + ndelay(100); + + if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + DRM_ERROR("D_COMP RCOMP still in progress\n"); + + if (allow_power_down) { + val = I915_READ(LCPLL_CTL); + val |= LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + } +} + +/* + * Fully restores LCPLL, disallowing power down and switching back to LCPLL + * source. + */ +void hsw_restore_lcpll(struct drm_i915_private *dev_priv) +{ + uint32_t val; + + val = I915_READ(LCPLL_CTL); + + if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | + LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) + return; + + if (val & LCPLL_POWER_DOWN_ALLOW) { + val &= ~LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + } + + val = I915_READ(D_COMP); + val |= D_COMP_COMP_FORCE; + val &= ~D_COMP_COMP_DISABLE; + I915_WRITE(D_COMP, val); + I915_READ(D_COMP); + + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) + DRM_ERROR("LCPLL not locked yet\n"); + + if (val & LCPLL_CD_SOURCE_FCLK) { + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + DRM_ERROR("Switching back to LCPLL failed\n"); + } +} + static void haswell_modeset_global_resources(struct drm_device *dev) { bool enable = false; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40e955d00b2b..253fc1e9dbe2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -838,5 +838,8 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, extern void intel_edp_psr_enable(struct intel_dp *intel_dp); extern void intel_edp_psr_disable(struct intel_dp *intel_dp); extern void intel_edp_psr_update(struct drm_device *dev); +extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, + bool switch_to_fclk, bool allow_power_down); +extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); #endif /* __INTEL_DRV_H__ */ -- cgit v1.2.3 From f63a484c2f606b8267eb4d1dbfce5d1d3416e0bb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 23 Jul 2013 19:24:38 +0200 Subject: drm/i915: disable stolen mem for OVERLAY_NEEDS_PHYSICAL Our phys_object code can't deal with stolen memory and so blows up. Fixing this is quite a bit of work and not worth it much for a single page object, so just opt-out. This is necessary prep work to enable stolen on gen2/3 platforms where the overlay register file isn't stored in the gtt. Cc: Chris Wilson Acked-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 2abb53e6f1e0..9ec5a4e12af2 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev) overlay->dev = dev; - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); + reg_bo = NULL; + if (!OVERLAY_NEEDS_PHYSICAL(dev)) + reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); if (reg_bo == NULL) reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (reg_bo == NULL) -- cgit v1.2.3 From 17fec8a08698bcab98788e1e89f5b8e7502ababd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2013 00:23:33 +0100 Subject: drm/i915: Use Graphics Base of Stolen Memory on all gen3+ So I made the mistake of missing that the desktop and mobile chipsets have different layouts in their PCI configurations, and we were incorrectly setting the wrong physical address for stolen memory on mobile chipsets. Since all gen3+ are actually consistent in the location of the GBSM register in the PCI configuration space on device 2 (the GPU), use it. Signed-off-by: Chris Wilson Cc: Daniel Vetter [danvet: Drop cc: stable and fudge conflicts.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 42 ++++++++++------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 9a1896c86dcd..837cf534f47a 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -45,45 +45,27 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev_priv->bridge_dev; struct resource *r; u32 base; - /* On the machines I have tested the Graphics Base of Stolen Memory - * is unreliable, so on those compute the base by subtracting the - * stolen memory from the Top of Low Usable DRAM which is where the - * BIOS places the graphics stolen memory. + /* Almost universally we can find the Graphics Base of Stolen Memory + * at offset 0x5c in the igfx configuration space. On a few (desktop) + * machines this is also mirrored in the bridge device at different + * locations, or in the MCHBAR. On gen2, the layout is again slightly + * different with the Graphics Segment immediately following Top of + * Memory (or Top of Usable DRAM). Note it appears that TOUD is only + * reported by 865g, so we just use the top of memory as determined + * by the e820 probe. * - * On gen2, the layout is slightly different with the Graphics Segment - * immediately following Top of Memory (or Top of Usable DRAM). Note - * it appears that TOUD is only reported by 865g, so we just use the - * top of memory as determined by the e820 probe. - * - * XXX gen2 requires an unavailable symbol and 945gm fails with - * its value of TOLUD. + * XXX However gen2 requires an unavailable symbol. */ base = 0; - if (IS_VALLEYVIEW(dev)) { + if (INTEL_INFO(dev)->gen >= 3) { + /* Read Graphics Base of Stolen Memory directly */ pci_read_config_dword(dev->pdev, 0x5c, &base); base &= ~((1<<20) - 1); - } else if (INTEL_INFO(dev)->gen >= 6) { - /* Read Base Data of Stolen Memory Register (BDSM) directly. - * Note that there is also a MCHBAR miror at 0x1080c0 or - * we could use device 2:0x5c instead. - */ - pci_read_config_dword(pdev, 0xB0, &base); - base &= ~4095; /* lower bits used for locking register */ - } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { - /* Read Graphics Base of Stolen Memory directly */ - pci_read_config_dword(pdev, 0xA4, &base); + } else { /* GEN2 */ #if 0 - } else if (IS_GEN3(dev)) { - u8 val; - /* Stolen is immediately below Top of Low Usable DRAM */ - pci_read_config_byte(pdev, 0x9c, &val); - base = val >> 3 << 27; - base -= dev_priv->mm.gtt->stolen_size; - } else { /* Stolen is immediately above Top of Memory */ base = max_low_pfn_mapped << PAGE_SHIFT; #endif -- cgit v1.2.3 From d861e3387650296f1fca2a4dd0dcd380c8fdddad Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 Jul 2013 23:25:03 +0200 Subject: drm/i915: fix reference counting in i915_gem_create This function is called without the dev->struct_mutex held, hence we need to use the _unlocked unreference variants. As soon as the object is registered userspace can sneak in here with a gem_close ioctl call, so the object can (and with my new evil tests actually does) get the final unreference in this place. The lack of locking then results in hilarity and some good leakage. To fix this we simply need to revert Chris Wilson v2: We need to make the trace call _before_ we drop our ref - the object might very well be gone by then already. v3: Just revert the original patch as suggested by Chris Wilson. Cc: Chris Wilson Reviewed-by: Chris Wilson [danvet: Remove the added white line again to tighten the return block, requested by Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c4653df5799b..0a1ddb8a28a7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -219,16 +219,10 @@ i915_gem_create(struct drm_file *file, return -ENOMEM; ret = drm_gem_handle_create(file, &obj->base, &handle); - if (ret) { - drm_gem_object_release(&obj->base); - i915_gem_info_remove_obj(dev->dev_private, obj->base.size); - i915_gem_object_free(obj); - return ret; - } - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference(&obj->base); - trace_i915_gem_object_create(obj); + drm_gem_object_unreference_unlocked(&obj->base); + if (ret) + return ret; *handle_p = handle; return 0; @@ -3956,6 +3950,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, } else obj->cache_level = I915_CACHE_NONE; + trace_i915_gem_object_create(obj); + return obj; } -- cgit v1.2.3 From fe3078fa5c367186c94a6652581ffbe9ccea4640 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 24 Jul 2013 21:06:15 +0200 Subject: drm: add unified vma offset manager If we want to map GPU memory into user-space, we need to linearize the addresses to not confuse mm-core. Currently, GEM and TTM both implement their own offset-managers to assign a pgoff to each object for user-space CPU access. GEM uses a hash-table, TTM uses an rbtree. This patch provides a unified implementation that can be used to replace both. TTM allows partial mmaps with a given offset, so we cannot use hashtables as the start address may not be known at mmap time. Hence, we use the rbtree-implementation of TTM. We could easily update drm_mm to use an rbtree instead of a linked list for it's object list and thus drop the rbtree from the vma-manager. However, this would slow down drm_mm object allocation for all other use-cases (rbtree insertion) and add another 4-8 bytes to each mm node. Hence, use the separate tree but allow for later migration. This is a rewrite of the 2012-proposal by David Airlie v2: - fix Docbook integration - drop drm_mm_node_linked() and use drm_mm_node_allocated() - remove unjustified likely/unlikely usage (but keep for rbtree paths) - remove BUG_ON() as drm_mm already does that - clarify page-based vs. byte-based addresses - use drm_vma_node_reset() for initialization, too v4: - allow external locking via drm_vma_offset_un/lock_lookup() - add locked lookup helper drm_vma_offset_lookup_locked() v5: - fix drm_vma_offset_lookup() to correctly validate range-mismatches (fix (offset > start + pages)) - fix drm_vma_offset_exact_lookup() to actually do what it says - remove redundant vm_pages member (add drm_vma_node_size() helper) - remove unneeded goto - fix documentation Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 6 + drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_vma_manager.c | 281 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_vma_manager.h | 202 +++++++++++++++++++++++++++ 4 files changed, 490 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/drm_vma_manager.c create mode 100644 include/drm/drm_vma_manager.h diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 7d1278e7a434..87e22ecd9281 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -2212,6 +2212,12 @@ void intel_crt_init(struct drm_device *dev) !Iinclude/drm/drm_rect.h !Edrivers/gpu/drm/drm_rect.c + + VMA Offset Manager +!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager +!Edrivers/gpu/drm/drm_vma_manager.c +!Iinclude/drm/drm_vma_manager.h + diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 801bcafa3028..d943b94afc90 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -13,7 +13,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_crtc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ - drm_rect.o + drm_rect.o drm_vma_manager.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c new file mode 100644 index 000000000000..b966cea95f11 --- /dev/null +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2012 David Airlie + * Copyright (c) 2013 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: vma offset manager + * + * The vma-manager is responsible to map arbitrary driver-dependent memory + * regions into the linear user address-space. It provides offsets to the + * caller which can then be used on the address_space of the drm-device. It + * takes care to not overlap regions, size them appropriately and to not + * confuse mm-core by inconsistent fake vm_pgoff fields. + * Drivers shouldn't use this for object placement in VMEM. This manager should + * only be used to manage mappings into linear user-space VMs. + * + * We use drm_mm as backend to manage object allocations. But it is highly + * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to + * speed up offset lookups. + * + * You must not use multiple offset managers on a single address_space. + * Otherwise, mm-core will be unable to tear down memory mappings as the VM will + * no longer be linear. Please use VM_NONLINEAR in that case and implement your + * own offset managers. + * + * This offset manager works on page-based addresses. That is, every argument + * and return code (with the exception of drm_vma_node_offset_addr()) is given + * in number of pages, not number of bytes. That means, object sizes and offsets + * must always be page-aligned (as usual). + * If you want to get a valid byte-based user-space address for a given offset, + * please see drm_vma_node_offset_addr(). + */ + +/** + * drm_vma_offset_manager_init - Initialize new offset-manager + * @mgr: Manager object + * @page_offset: Offset of available memory area (page-based) + * @size: Size of available address space range (page-based) + * + * Initialize a new offset-manager. The offset and area size available for the + * manager are given as @page_offset and @size. Both are interpreted as + * page-numbers, not bytes. + * + * Adding/removing nodes from the manager is locked internally and protected + * against concurrent access. However, node allocation and destruction is left + * for the caller. While calling into the vma-manager, a given node must + * always be guaranteed to be referenced. + */ +void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, + unsigned long page_offset, unsigned long size) +{ + rwlock_init(&mgr->vm_lock); + mgr->vm_addr_space_rb = RB_ROOT; + drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); +} +EXPORT_SYMBOL(drm_vma_offset_manager_init); + +/** + * drm_vma_offset_manager_destroy() - Destroy offset manager + * @mgr: Manager object + * + * Destroy an object manager which was previously created via + * drm_vma_offset_manager_init(). The caller must remove all allocated nodes + * before destroying the manager. Otherwise, drm_mm will refuse to free the + * requested resources. + * + * The manager must not be accessed after this function is called. + */ +void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) +{ + /* take the lock to protect against buggy drivers */ + write_lock(&mgr->vm_lock); + drm_mm_takedown(&mgr->vm_addr_space_mm); + write_unlock(&mgr->vm_lock); +} +EXPORT_SYMBOL(drm_vma_offset_manager_destroy); + +/** + * drm_vma_offset_lookup() - Find node in offset space + * @mgr: Manager object + * @start: Start address for object (page-based) + * @pages: Size of object (page-based) + * + * Find a node given a start address and object size. This returns the _best_ + * match for the given node. That is, @start may point somewhere into a valid + * region and the given node will be returned, as long as the node spans the + * whole requested area (given the size in number of pages as @pages). + * + * RETURNS: + * Returns NULL if no suitable node can be found. Otherwise, the best match + * is returned. It's the caller's responsibility to make sure the node doesn't + * get destroyed before the caller can access it. + */ +struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + + read_lock(&mgr->vm_lock); + node = drm_vma_offset_lookup_locked(mgr, start, pages); + read_unlock(&mgr->vm_lock); + + return node; +} +EXPORT_SYMBOL(drm_vma_offset_lookup); + +/** + * drm_vma_offset_lookup_locked() - Find node in offset space + * @mgr: Manager object + * @start: Start address for object (page-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup + * manually. See drm_vma_offset_lock_lookup() for an example. + * + * RETURNS: + * Returns NULL if no suitable node can be found. Otherwise, the best match + * is returned. + */ +struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node, *best; + struct rb_node *iter; + unsigned long offset; + + iter = mgr->vm_addr_space_rb.rb_node; + best = NULL; + + while (likely(iter)) { + node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); + offset = node->vm_node.start; + if (start >= offset) { + iter = iter->rb_right; + best = node; + if (start == offset) + break; + } else { + iter = iter->rb_left; + } + } + + /* verify that the node spans the requested area */ + if (best) { + offset = best->vm_node.start + best->vm_node.size; + if (offset < start + pages) + best = NULL; + } + + return best; +} +EXPORT_SYMBOL(drm_vma_offset_lookup_locked); + +/* internal helper to link @node into the rb-tree */ +static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node) +{ + struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; + struct rb_node *parent = NULL; + struct drm_vma_offset_node *iter_node; + + while (likely(*iter)) { + parent = *iter; + iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); + + if (node->vm_node.start < iter_node->vm_node.start) + iter = &(*iter)->rb_left; + else if (node->vm_node.start > iter_node->vm_node.start) + iter = &(*iter)->rb_right; + else + BUG(); + } + + rb_link_node(&node->vm_rb, parent, iter); + rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb); +} + +/** + * drm_vma_offset_add() - Add offset node to manager + * @mgr: Manager object + * @node: Node to be added + * @pages: Allocation size visible to user-space (in number of pages) + * + * Add a node to the offset-manager. If the node was already added, this does + * nothing and return 0. @pages is the size of the object given in number of + * pages. + * After this call succeeds, you can access the offset of the node until it + * is removed again. + * + * If this call fails, it is safe to retry the operation or call + * drm_vma_offset_remove(), anyway. However, no cleanup is required in that + * case. + * + * @pages is not required to be the same size as the underlying memory object + * that you want to map. It only limits the size that user-space can map into + * their address space. + * + * RETURNS: + * 0 on success, negative error code on failure. + */ +int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node, unsigned long pages) +{ + int ret; + + write_lock(&mgr->vm_lock); + + if (drm_mm_node_allocated(&node->vm_node)) { + ret = 0; + goto out_unlock; + } + + ret = drm_mm_insert_node_generic(&mgr->vm_addr_space_mm, + &node->vm_node, pages, 0, 0); + if (ret) + goto out_unlock; + + _drm_vma_offset_add_rb(mgr, node); + +out_unlock: + write_unlock(&mgr->vm_lock); + return ret; +} +EXPORT_SYMBOL(drm_vma_offset_add); + +/** + * drm_vma_offset_remove() - Remove offset node from manager + * @mgr: Manager object + * @node: Node to be removed + * + * Remove a node from the offset manager. If the node wasn't added before, this + * does nothing. After this call returns, the offset and size will be 0 until a + * new offset is allocated via drm_vma_offset_add() again. Helper functions like + * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no + * offset is allocated. + */ +void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node) +{ + write_lock(&mgr->vm_lock); + + if (drm_mm_node_allocated(&node->vm_node)) { + rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb); + drm_mm_remove_node(&node->vm_node); + memset(&node->vm_node, 0, sizeof(node->vm_node)); + } + + write_unlock(&mgr->vm_lock); +} +EXPORT_SYMBOL(drm_vma_offset_remove); diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h new file mode 100644 index 000000000000..7ee8c4babeb9 --- /dev/null +++ b/include/drm/drm_vma_manager.h @@ -0,0 +1,202 @@ +#ifndef __DRM_VMA_MANAGER_H__ +#define __DRM_VMA_MANAGER_H__ + +/* + * Copyright (c) 2013 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +struct drm_vma_offset_node { + struct drm_mm_node vm_node; + struct rb_node vm_rb; +}; + +struct drm_vma_offset_manager { + rwlock_t vm_lock; + struct rb_root vm_addr_space_rb; + struct drm_mm vm_addr_space_mm; +}; + +void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, + unsigned long page_offset, unsigned long size); +void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); + +struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages); +struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages); +int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node, unsigned long pages); +void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node); + +/** + * drm_vma_offset_exact_lookup() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +} + +/** + * drm_vma_offset_lock_lookup() - Lock lookup for extended private use + * @mgr: Manager object + * + * Lock VMA manager for extended lookups. Only *_locked() VMA function calls + * are allowed while holding this lock. All other contexts are blocked from VMA + * until the lock is released via drm_vma_offset_unlock_lookup(). + * + * Use this if you need to take a reference to the objects returned by + * drm_vma_offset_lookup_locked() before releasing this lock again. + * + * This lock must not be used for anything else than extended lookups. You must + * not call any other VMA helpers while holding this lock. + * + * Note: You're in atomic-context while holding this lock! + * + * Example: + * drm_vma_offset_lock_lookup(mgr); + * node = drm_vma_offset_lookup_locked(mgr); + * if (node) + * kref_get_unless_zero(container_of(node, sth, entr)); + * drm_vma_offset_unlock_lookup(mgr); + */ +static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_lock(&mgr->vm_lock); +} + +/** + * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use + * @mgr: Manager object + * + * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information. + */ +static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_unlock(&mgr->vm_lock); +} + +/** + * drm_vma_node_reset() - Initialize or reset node object + * @node: Node to initialize or reset + * + * Reset a node to its initial state. This must be called if @node isn't + * already cleared (eg., via kzalloc) before using it with any VMA offset + * manager. + * + * This must not be called on an already allocated node, or you will leak + * memory. + */ +static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) +{ + memset(node, 0, sizeof(*node)); +} + +/** + * drm_vma_node_start() - Return start address for page-based addressing + * @node: Node to inspect + * + * Return the start address of the given node. This can be used as offset into + * the linear VM space that is provided by the VMA offset manager. Note that + * this can only be used for page-based addressing. If you need a proper offset + * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the + * drm_vma_node_offset_addr() helper instead. + * + * RETURNS: + * Start address of @node for page-based addressing. 0 if the node does not + * have an offset allocated. + */ +static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) +{ + return node->vm_node.start; +} + +/** + * drm_vma_node_size() - Return size (page-based) + * @node: Node to inspect + * + * Return the size as number of pages for the given node. This is the same size + * that was passed to drm_vma_offset_add(). If no offset is allocated for the + * node, this is 0. + * + * RETURNS: + * Size of @node as number of pages. 0 if the node does not have an offset + * allocated. + */ +static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) +{ + return node->vm_node.size; +} + +/** + * drm_vma_node_has_offset() - Check whether node is added to offset manager + * @node: Node to be checked + * + * RETURNS: + * true iff the node was previously allocated an offset and added to + * an vma offset manager. + */ +static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node) +{ + return drm_mm_node_allocated(&node->vm_node); +} + +/** + * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps + * @node: Linked offset node + * + * Same as drm_vma_node_start() but returns the address as a valid offset that + * can be used for user-space mappings during mmap(). + * This must not be called on unlinked nodes. + * + * RETURNS: + * Offset of @node for byte-based addressing. 0 if the node does not have an + * object allocated. + */ +static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) +{ + return ((__u64)node->vm_node.start) << PAGE_SHIFT; +} + +#endif /* __DRM_VMA_MANAGER_H__ */ -- cgit v1.2.3 From 0de23977cfeb5b357ec884ba15417ae118ff9e9b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 24 Jul 2013 21:07:52 +0200 Subject: drm/gem: convert to new unified vma manager Use the new vma manager instead of the old hashtable. Also convert all drivers to use the new convenience helpers. This drops all the (map_list.hash.key << PAGE_SHIFT) non-sense. Locking and access-management is exactly the same as before with an additional lock inside of the vma-manager, which strictly wouldn't be needed for gem. v2: - rebase on drm-next - init nodes via drm_vma_node_reset() in drm_gem.c v3: - fix tegra v4: - remove duplicate if (drm_vma_node_has_offset()) checks - inline now trivial drm_vma_node_offset_addr() calls v5: - skip node-reset on gem-init due to kzalloc() - do not allow mapping gem-objects with offsets (backwards compat) - remove unneccessary casts Cc: Inki Dae Cc: Rob Clark Cc: Dave Airlie Cc: Thierry Reding Signed-off-by: David Herrmann Acked-by: Patrik Jakobsson Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 89 +++++------------------------- drivers/gpu/drm/drm_gem_cma_helper.c | 16 ++---- drivers/gpu/drm/exynos/exynos_drm_gem.c | 14 ++--- drivers/gpu/drm/gma500/gem.c | 15 ++--- drivers/gpu/drm/i915/i915_gem.c | 10 ++-- drivers/gpu/drm/omapdrm/omap_gem.c | 28 +++++----- drivers/gpu/drm/omapdrm/omap_gem_helpers.c | 49 +--------------- drivers/gpu/drm/udl/udl_gem.c | 13 ++--- drivers/gpu/host1x/drm/gem.c | 5 +- include/drm/drmP.h | 7 +-- include/uapi/drm/drm.h | 2 +- 11 files changed, 62 insertions(+), 186 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 1ad9e7ec0119..3613b50b5c26 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -37,6 +37,7 @@ #include #include #include +#include /** @file drm_gem.c * @@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev) } dev->mm_private = mm; - - if (drm_ht_create(&mm->offset_hash, 12)) { - kfree(mm); - return -ENOMEM; - } - - drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE); + drm_vma_offset_manager_init(&mm->vma_manager, + DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE); return 0; } @@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev) { struct drm_gem_mm *mm = dev->mm_private; - drm_mm_takedown(&mm->offset_manager); - drm_ht_remove(&mm->offset_hash); + drm_vma_offset_manager_destroy(&mm->vma_manager); kfree(mm); dev->mm_private = NULL; } @@ -302,12 +297,8 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - drm_mm_put_block(list->file_offset_node); - kfree(list->map); - list->map = NULL; + drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); } EXPORT_SYMBOL(drm_gem_free_mmap_offset); @@ -327,54 +318,9 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_local_map *map; - int ret; - - /* Set the object up for mmap'ing */ - list = &obj->map_list; - list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); - if (!list->map) - return -ENOMEM; - - map = list->map; - map->type = _DRM_GEM; - map->size = obj->size; - map->handle = obj; - - /* Get a DRM GEM mmap offset allocated... */ - list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, false); - - if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); - ret = -ENOSPC; - goto out_free_list; - } - list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); - if (!list->file_offset_node) { - ret = -ENOMEM; - goto out_free_list; - } - - list->hash.key = list->file_offset_node->start; - ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); - if (ret) { - DRM_ERROR("failed to add to map hash\n"); - goto out_free_mm; - } - - return 0; - -out_free_mm: - drm_mm_put_block(list->file_offset_node); -out_free_list: - kfree(list->map); - list->map = NULL; - - return ret; + return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, + obj->size / PAGE_SIZE); } EXPORT_SYMBOL(drm_gem_create_mmap_offset); @@ -703,8 +649,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_local_map *map = NULL; - struct drm_hash_item *hash; + struct drm_gem_object *obj; + struct drm_vma_offset_node *node; int ret = 0; if (drm_device_is_unplugged(dev)) @@ -712,21 +658,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) mutex_lock(&dev->struct_mutex); - if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { + node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff, + vma_pages(vma)); + if (!node) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); } - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - if (!map || - ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { - ret = -EPERM; - goto out_unlock; - } - - ret = drm_gem_mmap_obj(map->handle, map->size, vma); + obj = container_of(node, struct drm_gem_object, vma_node); + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node), vma); -out_unlock: mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index ece72a8ac245..847f09117666 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -27,11 +27,7 @@ #include #include #include - -static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) -{ - return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; -} +#include /* * __drm_gem_cma_create - Create a GEM CMA object without allocating memory @@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) { struct drm_gem_cma_object *cma_obj; - if (gem_obj->map_list.map) - drm_gem_free_mmap_offset(gem_obj); + drm_gem_free_mmap_offset(gem_obj); cma_obj = to_drm_gem_cma_obj(gem_obj); @@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, return -EINVAL; } - *offset = get_gem_mmap_offset(gem_obj); + *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); drm_gem_object_unreference(gem_obj); @@ -301,12 +296,11 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m { struct drm_gem_object *obj = &cma_obj->base; struct drm_device *dev = obj->dev; - uint64_t off = 0; + uint64_t off; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (obj->map_list.map) - off = (uint64_t)obj->map_list.hash.key; + off = drm_vma_node_start(&obj->vma_node); seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d", obj->name, obj->refcount.refcount.counter, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 24c22a8c3364..be32db1ab290 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -10,6 +10,7 @@ */ #include +#include #include #include @@ -152,8 +153,7 @@ out: exynos_drm_fini_buf(obj->dev, buf); exynos_gem_obj->buffer = NULL; - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); + drm_gem_free_mmap_offset(obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); @@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - if (!obj->map_list.map) { - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - } + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto out; - *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; + *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); out: diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index fe1d3320ce6a..2f77bea30b11 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "psb_drv.h" int psb_gem_init_object(struct drm_gem_object *obj) @@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj) struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); /* Remove the list map if one is present */ - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); + drm_gem_free_mmap_offset(obj); drm_gem_object_release(obj); /* This must occur last as it frees up the memory of the GEM object */ @@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, /* What validation is needed here ? */ /* Make it mmapable */ - if (!obj->map_list.map) { - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - } - /* GEM should really work out the hash offsets for us */ - *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto out; + *offset = drm_vma_node_offset_addr(&obj->vma_node); out: drm_gem_object_unreference(obj); unlock: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 46bf7e3887d4..53f81b3b3424 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -26,6 +26,7 @@ */ #include +#include #include #include "i915_drv.h" #include "i915_trace.h" @@ -1428,7 +1429,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) if (obj->base.dev->dev_mapping) unmap_mapping_range(obj->base.dev->dev_mapping, - (loff_t)obj->base.map_list.hash.key<base.vma_node), obj->base.size, 1); obj->fault_mappable = false; @@ -1486,7 +1487,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->base.map_list.map) + if (drm_vma_node_has_offset(&obj->base.vma_node)) return 0; dev_priv->mm.shrinker_no_lock_stealing = true; @@ -1517,9 +1518,6 @@ out: static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) { - if (!obj->base.map_list.map) - return; - drm_gem_free_mmap_offset(&obj->base); } @@ -1558,7 +1556,7 @@ i915_gem_mmap_gtt(struct drm_file *file, if (ret) goto out; - *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; + *offset = drm_vma_node_offset_addr(&obj->base.vma_node); out: drm_gem_object_unreference(&obj->base); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index cbcd71e6ed83..f90531fc00c9 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -20,6 +20,7 @@ #include #include +#include #include "omap_drv.h" #include "omap_dmm_tiler.h" @@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj) static uint64_t mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + int ret; + size_t size; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (!obj->map_list.map) { - /* Make it mmapable */ - size_t size = omap_gem_mmap_size(obj); - int ret = _drm_gem_create_mmap_offset_size(obj, size); - - if (ret) { - dev_err(dev->dev, "could not allocate mmap offset\n"); - return 0; - } + /* Make it mmapable */ + size = omap_gem_mmap_size(obj); + ret = _drm_gem_create_mmap_offset_size(obj, size); + if (ret) { + dev_err(dev->dev, "could not allocate mmap offset\n"); + return 0; } - return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; + return drm_vma_node_offset_addr(&obj->vma_node); } uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) @@ -997,12 +997,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct drm_device *dev = obj->dev; struct omap_gem_object *omap_obj = to_omap_bo(obj); - uint64_t off = 0; + uint64_t off; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (obj->map_list.map) - off = (uint64_t)obj->map_list.hash.key; + off = drm_vma_node_start(&obj->vma_node); seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", omap_obj->flags, obj->name, obj->refcount.refcount.counter, @@ -1309,8 +1308,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) list_del(&omap_obj->mm_list); - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); + drm_gem_free_mmap_offset(obj); /* this means the object is still pinned.. which really should * not happen. I think.. diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c index f9eb679eb79b..dbb157542f8f 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c @@ -118,52 +118,7 @@ _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_local_map *map; - int ret = 0; - - /* Set the object up for mmap'ing */ - list = &obj->map_list; - list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); - if (!list->map) - return -ENOMEM; - - map = list->map; - map->type = _DRM_GEM; - map->size = size; - map->handle = obj; - - /* Get a DRM GEM mmap offset allocated... */ - list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - size / PAGE_SIZE, 0, 0); - - if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); - ret = -ENOSPC; - goto out_free_list; - } - - list->file_offset_node = drm_mm_get_block(list->file_offset_node, - size / PAGE_SIZE, 0); - if (!list->file_offset_node) { - ret = -ENOMEM; - goto out_free_list; - } - - list->hash.key = list->file_offset_node->start; - ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); - if (ret) { - DRM_ERROR("failed to add to map hash\n"); - goto out_free_mm; - } - - return 0; - -out_free_mm: - drm_mm_put_block(list->file_offset_node); -out_free_list: - kfree(list->map); - list->map = NULL; - return ret; + return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, + size / PAGE_SIZE); } diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index ef034fa3e6f5..2a4cb2f83b36 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -223,8 +223,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj) if (obj->pages) udl_gem_put_pages(obj); - if (gem_obj->map_list.map) - drm_gem_free_mmap_offset(gem_obj); + drm_gem_free_mmap_offset(gem_obj); } /* the dumb interface doesn't work with the GEM straight MMAP @@ -247,13 +246,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, ret = udl_gem_get_pages(gobj, GFP_KERNEL); if (ret) goto out; - if (!gobj->base.map_list.map) { - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - } + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto out; - *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; + *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: drm_gem_object_unreference(&gobj->base); diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c index c5e9a9b494c2..bc323b3dbe4d 100644 --- a/drivers/gpu/host1x/drm/gem.c +++ b/drivers/gpu/host1x/drm/gem.c @@ -108,7 +108,7 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo) { - return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT; + return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node); } struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) @@ -182,8 +182,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); - if (gem->map_list.map) - drm_gem_free_mmap_offset(gem); + drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); tegra_bo_destroy(gem->dev, bo); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0ab6a090a15c..4b518e05d293 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -71,6 +71,7 @@ #include #include #include +#include #include @@ -587,7 +588,6 @@ struct drm_map_list { struct drm_local_map *map; /**< mapping */ uint64_t user_token; struct drm_master *master; - struct drm_mm_node *file_offset_node; /**< fake offset */ }; /** @@ -622,8 +622,7 @@ struct drm_ati_pcigart_info { * GEM specific mm private for tracking GEM objects */ struct drm_gem_mm { - struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ - struct drm_open_hash offset_hash; /**< User token hash table for maps */ + struct drm_vma_offset_manager vma_manager; }; /** @@ -644,7 +643,7 @@ struct drm_gem_object { struct file *filp; /* Mapping info for this object */ - struct drm_map_list map_list; + struct drm_vma_offset_node vma_node; /** * Size of the object, in bytes. Immutable over the object's diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 238a166b9fe6..272580ca320f 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -181,7 +181,7 @@ enum drm_map_type { _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ - _DRM_GEM = 6, /**< GEM object */ + _DRM_GEM = 6, /**< GEM object (obsolete) */ }; /** -- cgit v1.2.3 From 72525b3f333de54fa0c42ef87f27861e41478f1e Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 24 Jul 2013 21:08:53 +0200 Subject: drm/ttm: convert to unified vma offset manager Use the new vma-manager infrastructure. This doesn't change any implementation details as the vma-offset-manager is nearly copied 1-to-1 from TTM. The vm_lock is moved into the offset manager so we can drop it from TTM. During lookup, we use the vma locking helpers to take a reference to the found object. In all other scenarios, locking stays the same as before. We always guarantee that drm_vma_offset_remove() is called only during destruction. Hence, helpers like drm_vma_node_offset_addr() are always safe as long as the node has a valid offset. This also drops the addr_space_offset member as it is a copy of vm_start in vma_node objects. Use the accessor functions instead. v4: - remove vm_lock - use drm_vma_offset_lock_lookup() to protect lookup (instead of vm_lock) Cc: Dave Airlie Cc: Ben Skeggs Cc: Maarten Lankhorst Cc: Martin Peres Cc: Alex Deucher Cc: Thomas Hellstrom Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_main.c | 2 +- drivers/gpu/drm/cirrus/cirrus_main.c | 2 +- drivers/gpu/drm/mgag200/mgag200_main.c | 2 +- drivers/gpu/drm/nouveau/nouveau_display.c | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- drivers/gpu/drm/qxl/qxl_object.h | 2 +- drivers/gpu/drm/qxl/qxl_release.c | 2 +- drivers/gpu/drm/radeon/radeon_object.h | 5 +- drivers/gpu/drm/ttm/ttm_bo.c | 89 ++++++------------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 3 +- drivers/gpu/drm/ttm/ttm_bo_vm.c | 81 +++++++++++----------------- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 +- include/drm/ttm/ttm_bo_api.h | 15 ++---- include/drm/ttm/ttm_bo_driver.h | 10 ++-- 14 files changed, 66 insertions(+), 155 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f60fd7bd1183..c195dc2abc09 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -487,7 +487,7 @@ void ast_gem_free_object(struct drm_gem_object *obj) static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) { - return bo->bo.addr_space_offset; + return drm_vma_node_offset_addr(&bo->bo.vma_node); } int ast_dumb_mmap_offset(struct drm_file *file, diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 35cbae827771..3a7a0efe3675 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -294,7 +294,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj) static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) { - return bo->bo.addr_space_offset; + return drm_vma_node_offset_addr(&bo->bo.vma_node); } int diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 9fa5685baee0..1a75ea395b33 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -349,7 +349,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj) static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) { - return bo->bo.addr_space_offset; + return drm_vma_node_offset_addr(&bo->bo.vma_node); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c0037..7a8caa126db6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -697,7 +697,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, gem = drm_gem_object_lookup(dev, file_priv, handle); if (gem) { struct nouveau_bo *bo = gem->driver_private; - *poffset = bo->bo.addr_space_offset; + *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); drm_gem_object_unreference_unlocked(gem); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a8..86597ebf8c98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -226,7 +226,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - rep->map_handle = nvbo->bo.addr_space_offset; + rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce781..af10165adb0d 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo) static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) { - return bo->tbo.addr_space_offset; + return drm_vma_node_offset_addr(&bo->tbo.vma_node); } static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5f..1a648e1da6a6 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -87,7 +87,7 @@ qxl_release_free(struct qxl_device *qdev, for (i = 0 ; i < release->bo_count; ++i) { QXL_INFO(qdev, "release %llx\n", - release->bos[i]->tbo.addr_space_offset + drm_vma_node_offset_addr(&release->bos[i]->tbo.vma_node) - DRM_FILE_OFFSET); qxl_fence_remove_release(&release->bos[i]->fence, release->id); qxl_bo_unref(&release->bos[i]); diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 49c82c480013..209b11150263 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo) * @bo: radeon object for which we query the offset * * Returns mmap offset of the object. - * - * Note: addr_space_offset is constant after ttm bo init thus isn't protected - * by any lock. */ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) { - return bo->tbo.addr_space_offset; + return drm_vma_node_offset_addr(&bo->tbo.vma_node); } extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb9dd674670c..3dc08b612292 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -615,13 +615,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - write_lock(&bdev->vm_lock); - if (likely(bo->vm_node != NULL)) { - rb_erase(&bo->vm_rb, &bdev->addr_space_rb); - drm_mm_put_block(bo->vm_node); - bo->vm_node = NULL; - } - write_unlock(&bdev->vm_lock); + drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); @@ -1129,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->resv = &bo->ttm_resv; reservation_object_init(bo->resv); atomic_inc(&bo->glob->bo_count); + drm_vma_node_reset(&bo->vma_node); ret = ttm_bo_check_placement(bo, placement); @@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) TTM_DEBUG("Swap list was clean\n"); spin_unlock(&glob->lru_lock); - BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); - write_lock(&bdev->vm_lock); - drm_mm_takedown(&bdev->addr_space_mm); - write_unlock(&bdev->vm_lock); + drm_vma_offset_manager_destroy(&bdev->vma_manager); return ret; } @@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, { int ret = -EINVAL; - rwlock_init(&bdev->vm_lock); bdev->driver = driver; memset(bdev->man, 0, sizeof(bdev->man)); @@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, if (unlikely(ret != 0)) goto out_no_sys; - bdev->addr_space_rb = RB_ROOT; - drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); - + drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, + 0x10000000); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = NULL; @@ -1498,12 +1488,17 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - loff_t offset = (loff_t) bo->addr_space_offset; - loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + loff_t offset, holelen; if (!bdev->dev_mapping) return; - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); + + if (drm_vma_node_has_offset(&bo->vma_node)) { + offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node); + holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + + unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); + } ttm_mem_io_free_vm(bo); } @@ -1520,31 +1515,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); -static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct rb_node **cur = &bdev->addr_space_rb.rb_node; - struct rb_node *parent = NULL; - struct ttm_buffer_object *cur_bo; - unsigned long offset = bo->vm_node->start; - unsigned long cur_offset; - - while (*cur) { - parent = *cur; - cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); - cur_offset = cur_bo->vm_node->start; - if (offset < cur_offset) - cur = &parent->rb_left; - else if (offset > cur_offset) - cur = &parent->rb_right; - else - BUG(); - } - - rb_link_node(&bo->vm_rb, parent, cur); - rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); -} - /** * ttm_bo_setup_vm: * @@ -1559,38 +1529,9 @@ static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - int ret; - -retry_pre_get: - ret = drm_mm_pre_get(&bdev->addr_space_mm); - if (unlikely(ret != 0)) - return ret; - - write_lock(&bdev->vm_lock); - bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, - bo->mem.num_pages, 0, 0); - - if (unlikely(bo->vm_node == NULL)) { - ret = -ENOMEM; - goto out_unlock; - } - bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, - bo->mem.num_pages, 0); - - if (unlikely(bo->vm_node == NULL)) { - write_unlock(&bdev->vm_lock); - goto retry_pre_get; - } - - ttm_bo_vm_insert_rb(bo); - write_unlock(&bdev->vm_lock); - bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; - - return 0; -out_unlock: - write_unlock(&bdev->vm_lock); - return ret; + return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + bo->mem.num_pages); } int ttm_bo_wait(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 319cf4127c5b..7cc904d3a4d1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); - fbo->vm_node = NULL; + drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); spin_lock(&bdev->fence_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 3df9f16b041c..8c0e2c020215 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -40,37 +41,6 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, - unsigned long page_start, - unsigned long num_pages) -{ - struct rb_node *cur = bdev->addr_space_rb.rb_node; - unsigned long cur_offset; - struct ttm_buffer_object *bo; - struct ttm_buffer_object *best_bo = NULL; - - while (likely(cur != NULL)) { - bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); - cur_offset = bo->vm_node->start; - if (page_start >= cur_offset) { - cur = cur->rb_right; - best_bo = bo; - if (page_start == cur_offset) - break; - } else - cur = cur->rb_left; - } - - if (unlikely(best_bo == NULL)) - return NULL; - - if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < - (page_start + num_pages))) - return NULL; - - return best_bo; -} - static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + - bo->vm_node->start - vma->vm_pgoff; + drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; page_last = vma_pages(vma) + - bo->vm_node->start - vma->vm_pgoff; + drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; @@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { .close = ttm_bo_vm_close }; +static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, + unsigned long offset, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + struct ttm_buffer_object *bo = NULL; + + drm_vma_offset_lock_lookup(&bdev->vma_manager); + + node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); + if (likely(node)) { + bo = container_of(node, struct ttm_buffer_object, vma_node); + if (!kref_get_unless_zero(&bo->kref)) + bo = NULL; + } + + drm_vma_offset_unlock_lookup(&bdev->vma_manager); + + if (!bo) + pr_err("Could not find buffer object to map\n"); + + return bo; +} + int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { @@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_buffer_object *bo; int ret; - read_lock(&bdev->vm_lock); - bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, - vma_pages(vma)); - if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) - bo = NULL; - read_unlock(&bdev->vm_lock); - - if (unlikely(bo == NULL)) { - pr_err("Could not find buffer object to map\n"); + bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); + if (unlikely(!bo)) return -EINVAL; - } driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { @@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, bool no_wait = false; bool dummy; - read_lock(&bdev->vm_lock); - bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); - if (likely(bo != NULL)) - ttm_bo_reference(bo); - read_unlock(&bdev->vm_lock); - + bo = ttm_bo_vm_lookup(bdev, dev_offset, 1); if (unlikely(bo == NULL)) return -EFAULT; @@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, if (unlikely(ret != 0)) goto out_unref; - kmap_offset = dev_offset - bo->vm_node->start; + kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node); if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7953d1f90b63..0e67cf41065d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, goto out_no_dmabuf; rep->handle = handle; - rep->map_handle = dma_buf->base.addr_space_offset; + rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); rep->cur_gmr_id = handle; rep->cur_gmr_offset = 0; @@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, if (ret != 0) return -EINVAL; - *offset = out_buf->base.addr_space_offset; + *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); vmw_dmabuf_unreference(&out_buf); return 0; } diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 8a6aa56ece52..751eaffbf0d5 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -32,12 +32,12 @@ #define _TTM_BO_API_H_ #include +#include #include #include #include #include #include -#include #include #include @@ -145,7 +145,6 @@ struct ttm_tt; * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. - * @addr_space_offset: Address space offset. * @acc_size: Accounted size for this object. * @kref: Reference count of this buffer object. When this refcount reaches * zero, the object is put on the delayed delete list. @@ -166,8 +165,7 @@ struct ttm_tt; * @swap: List head for swap LRU list. * @sync_obj: Pointer to a synchronization object. * @priv_flags: Flags describing buffer object internal state. - * @vm_rb: Rb node for the vm rb tree. - * @vm_node: Address space manager node. + * @vma_node: Address space manager node. * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -194,7 +192,6 @@ struct ttm_buffer_object { enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); unsigned long num_pages; - uint64_t addr_space_offset; size_t acc_size; /** @@ -238,13 +235,7 @@ struct ttm_buffer_object { void *sync_obj; unsigned long priv_flags; - /** - * Members protected by the bdev::vm_lock - */ - - struct rb_node vm_rb; - struct drm_mm_node *vm_node; - + struct drm_vma_offset_node vma_node; /** * Special members that are protected by the reserve lock diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 984fc2d571a1..8639c85d61c4 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -519,7 +520,7 @@ struct ttm_bo_global { * @man: An array of mem_type_managers. * @fence_lock: Protects the synchronizing members on *all* bos belonging * to this device. - * @addr_space_mm: Range manager for the device address space. + * @vma_manager: Address space manager * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @val_seq: Current validation sequence. @@ -537,14 +538,13 @@ struct ttm_bo_device { struct list_head device_list; struct ttm_bo_global *glob; struct ttm_bo_driver *driver; - rwlock_t vm_lock; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; spinlock_t fence_lock; + /* - * Protected by the vm lock. + * Protected by internal locks. */ - struct rb_root addr_space_rb; - struct drm_mm addr_space_mm; + struct drm_vma_offset_manager vma_manager; /* * Protected by the global:lru lock. -- cgit v1.2.3 From 51335df9f044ccfafb029f4d7fbeb11c4526340a Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 24 Jul 2013 21:10:03 +0200 Subject: drm/vma: provide drm_vma_node_unmap() helper Instead of unmapping the nodes in TTM and GEM users manually, we provide a generic wrapper which does the correct thing for all vma-nodes. v2: remove bdev->dev_mapping test in ttm_bo_unmap_virtual_unlocked() as ttm_mem_io_free_vm() does nothing in that case (io_reserved_vm is 0). v4: Fix docbook comments v5: use drm_vma_node_size() Cc: Dave Airlie Cc: Maarten Lankhorst Cc: Thomas Hellstrom Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_gem.c | 6 +----- drivers/gpu/drm/ttm/ttm_bo.c | 11 +---------- include/drm/drm_vma_manager.h | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 53f81b3b3424..8673a000a373 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1427,11 +1427,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) if (!obj->fault_mappable) return; - if (obj->base.dev->dev_mapping) - unmap_mapping_range(obj->base.dev->dev_mapping, - (loff_t)drm_vma_node_offset_addr(&obj->base.vma_node), - obj->base.size, 1); - + drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); obj->fault_mappable = false; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3dc08b612292..050edfaf5b88 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1488,17 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - loff_t offset, holelen; - if (!bdev->dev_mapping) - return; - - if (drm_vma_node_has_offset(&bo->vma_node)) { - offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node); - holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; - - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); - } + drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); ttm_mem_io_free_vm(bo); } diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 7ee8c4babeb9..22eedac046ac 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -24,6 +24,7 @@ */ #include +#include #include #include #include @@ -199,4 +200,25 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) return ((__u64)node->vm_node.start) << PAGE_SHIFT; } +/** + * drm_vma_node_unmap() - Unmap offset node + * @node: Offset node + * @file_mapping: Address space to unmap @node from + * + * Unmap all userspace mappings for a given offset node. The mappings must be + * associated with the @file_mapping address-space. If no offset exists or + * the address-space is invalid, nothing is done. + * + * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() + * is not called on this node concurrently. + */ +static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, + struct address_space *file_mapping) +{ + if (file_mapping && drm_vma_node_has_offset(node)) + unmap_mapping_range(file_mapping, + drm_vma_node_offset_addr(node), + drm_vma_node_size(node) << PAGE_SHIFT, 1); +} + #endif /* __DRM_VMA_MANAGER_H__ */ -- cgit v1.2.3 From 907b28c56ea40629aa6595ddfa414ec2fc7da41c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2013 20:36:52 +0100 Subject: drm/i915: Colocate all GT access routines in the same file Currently, the register access code is split between i915_drv.c and intel_pm.c. It only bares a superficial resemblance to the reset of the powermanagement code, so move it all into its own file. This is to ease further patches to enforce serialised register access. v2: Scan for random abuse of I915_WRITE_NOTRACE v3: Take the opportunity to rename the GT functions as uncore. Uncore is the term used by the hardware design (and bspec) for all functions outside of the GPU (and CPU) cores in what is also known as the System Agent. v4: Rebase onto SNB rc6 fixes Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky [danvet: Wrestle patch into applying and inline intel_uncore_early_sanitize (plus move the old comment to the new function). Also keep the _santize postfix for intel_uncore_sanitize.] [danvet: Squash in fixup spotted by Chris on irc: We need to call intel_pm_init before intel_uncore_sanitize since the later will call cancel_work on the delayed rps setup work the former initializes.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 12 +- drivers/gpu/drm/i915/i915_dma.c | 25 +- drivers/gpu/drm/i915/i915_drv.c | 271 +---------------- drivers/gpu/drm/i915/i915_drv.h | 31 +- drivers/gpu/drm/i915/i915_irq.c | 6 +- drivers/gpu/drm/i915/intel_display.c | 3 +- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_pm.c | 258 +--------------- drivers/gpu/drm/i915/intel_uncore.c | 571 +++++++++++++++++++++++++++++++++++ 10 files changed, 614 insertions(+), 565 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_uncore.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 9d1da7cceb21..b8449a84a0dc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -38,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ intel_sprite.o \ intel_opregion.o \ intel_sideband.o \ + intel_uncore.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ dvo_ivch.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9d871c7eeaee..0e904986f3e9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -989,9 +989,9 @@ static int gen6_drpc_info(struct seq_file *m) if (ret) return ret; - spin_lock_irq(&dev_priv->gt_lock); - forcewake_count = dev_priv->forcewake_count; - spin_unlock_irq(&dev_priv->gt_lock); + spin_lock_irq(&dev_priv->uncore.lock); + forcewake_count = dev_priv->uncore.forcewake_count; + spin_unlock_irq(&dev_priv->uncore.lock); if (forcewake_count) { seq_puts(m, "RC information inaccurate because somebody " @@ -1375,9 +1375,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; unsigned forcewake_count; - spin_lock_irq(&dev_priv->gt_lock); - forcewake_count = dev_priv->forcewake_count; - spin_unlock_irq(&dev_priv->gt_lock); + spin_lock_irq(&dev_priv->uncore.lock); + forcewake_count = dev_priv->uncore.forcewake_count; + spin_unlock_irq(&dev_priv->uncore.lock); seq_printf(m, "forcewake count = %u\n", forcewake_count); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1c5b39738508..8536acd0a85d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1435,22 +1435,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) #undef SEP_COMMA } -/** - * intel_early_sanitize_regs - clean up BIOS state - * @dev: DRM device - * - * This function must be called before we do any I915_READ or I915_WRITE. Its - * purpose is to clean up any state left by the BIOS that may affect us when - * reading and/or writing registers. - */ -static void intel_early_sanitize_regs(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (HAS_FPGA_DBG_UNCLAIMED(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); -} - /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1493,7 +1477,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->backlight.lock); - spin_lock_init(&dev_priv->gt_lock); + spin_lock_init(&dev_priv->uncore.lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); @@ -1529,7 +1513,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto put_bridge; } - intel_early_sanitize_regs(dev); + intel_uncore_early_sanitize(dev); if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { /* The docs do not explain exactly how the calculation can be @@ -1602,8 +1586,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); intel_irq_init(dev); - intel_gt_sanitize(dev); - intel_gt_init(dev); + intel_pm_init(dev); + intel_uncore_sanitize(dev); + intel_uncore_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5849b0a91b4e..01d63a0435fb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -723,7 +723,7 @@ static int i915_drm_thaw(struct drm_device *dev) { int error = 0; - intel_gt_sanitize(dev); + intel_uncore_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); @@ -749,7 +749,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_sanitize(dev); + intel_uncore_sanitize(dev); /* * Platforms with opregion should have sane BIOS, older ones (gen3 and @@ -770,140 +770,6 @@ int i915_resume(struct drm_device *dev) return 0; } -static int i8xx_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_I85X(dev)) - return -ENODEV; - - I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - if (IS_I830(dev) || IS_845G(dev)) { - I915_WRITE(DEBUG_RESET_I830, - DEBUG_RESET_DISPLAY | - DEBUG_RESET_RENDER | - DEBUG_RESET_FULL); - POSTING_READ(DEBUG_RESET_I830); - msleep(1); - - I915_WRITE(DEBUG_RESET_I830, 0); - POSTING_READ(DEBUG_RESET_I830); - } - - msleep(1); - - I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - return 0; -} - -static int i965_reset_complete(struct drm_device *dev) -{ - u8 gdrst; - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - return (gdrst & GRDOM_RESET_ENABLE) == 0; -} - -static int i965_do_reset(struct drm_device *dev) -{ - int ret; - - /* - * Set the domains we want to reset (GRDOM/bits 2 and 3) as - * well as the reset bit (GR/bit 0). Setting the GR bit - * triggers the reset; when done, the hardware will clear it. - */ - pci_write_config_byte(dev->pdev, I965_GDRST, - GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(i965_reset_complete(dev), 500); - if (ret) - return ret; - - /* We can't reset render&media without also resetting display ... */ - pci_write_config_byte(dev->pdev, I965_GDRST, - GRDOM_MEDIA | GRDOM_RESET_ENABLE); - - ret = wait_for(i965_reset_complete(dev), 500); - if (ret) - return ret; - - pci_write_config_byte(dev->pdev, I965_GDRST, 0); - - return 0; -} - -static int ironlake_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 gdrst; - int ret; - - gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); - gdrst &= ~GRDOM_MASK; - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, - gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); - if (ret) - return ret; - - /* We can't reset render&media without also resetting display ... */ - gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); - gdrst &= ~GRDOM_MASK; - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, - gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); - return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); -} - -static int gen6_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - unsigned long irqflags; - - /* Hold gt_lock across reset to prevent any register access - * with forcewake not set correctly - */ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - - /* Reset the chip */ - - /* GEN6_GDRST is not in the gt power well, no need to check - * for fifo space for the write or forcewake the chip for - * the read - */ - I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); - - /* Spin waiting for the device to ack the reset request */ - ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); - - /* If reset with a user forcewake, try to restore, otherwise turn it off */ - if (dev_priv->forcewake_count) - dev_priv->gt.force_wake_get(dev_priv); - else - dev_priv->gt.force_wake_put(dev_priv); - - /* Restore fifo count */ - dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); - return ret; -} - -int intel_gpu_reset(struct drm_device *dev) -{ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: return gen6_do_reset(dev); - case 5: return ironlake_do_reset(dev); - case 4: return i965_do_reset(dev); - case 2: return i8xx_do_reset(dev); - default: return -ENODEV; - } -} - /** * i915_reset - reset chip after a hang * @dev: drm device to reset @@ -1233,136 +1099,3 @@ module_exit(i915_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); - -/* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(dev_priv, reg) \ - ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) -static void -ilk_dummy_write(struct drm_i915_private *dev_priv) -{ - /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up - * the chip from rc6 before touching it for real. MI_MODE is masked, - * hence harmless to write 0 into. */ - I915_WRITE_NOTRACE(MI_MODE, 0); -} - -static void -hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) -{ - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unknown unclaimed register before writing to %x\n", - reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } -} - -static void -hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) -{ - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed write to %x\n", reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } -} - -#define __i915_read(x, y) \ -u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ - unsigned long irqflags; \ - u##x val = 0; \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - if (dev_priv->forcewake_count == 0) \ - dev_priv->gt.force_wake_get(dev_priv); \ - val = read##y(dev_priv->regs + reg); \ - if (dev_priv->forcewake_count == 0) \ - dev_priv->gt.force_wake_put(dev_priv); \ - } else { \ - val = read##y(dev_priv->regs + reg); \ - } \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ - trace_i915_reg_rw(false, reg, val, sizeof(val)); \ - return val; \ -} - -__i915_read(8, b) -__i915_read(16, w) -__i915_read(32, l) -__i915_read(64, q) -#undef __i915_read - -#define __i915_write(x, y) \ -void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ - unsigned long irqflags; \ - u32 __fifo_ret = 0; \ - trace_i915_reg_rw(true, reg, val, sizeof(val)); \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ - if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ - } \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - hsw_unclaimed_reg_clear(dev_priv, reg); \ - write##y(val, dev_priv->regs + reg); \ - if (unlikely(__fifo_ret)) { \ - gen6_gt_check_fifodbg(dev_priv); \ - } \ - hsw_unclaimed_reg_check(dev_priv, reg); \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ -} -__i915_write(8, b) -__i915_write(16, w) -__i915_write(32, l) -__i915_write(64, q) -#undef __i915_write - -static const struct register_whitelist { - uint64_t offset; - uint32_t size; - uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ -} whitelist[] = { - { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, -}; - -int i915_reg_read_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - int i; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) - break; - } - - if (i == ARRAY_SIZE(whitelist)) - return -EINVAL; - - switch (entry->size) { - case 8: - reg->val = I915_READ64(reg->offset); - break; - case 4: - reg->val = I915_READ(reg->offset); - break; - case 2: - reg->val = I915_READ16(reg->offset); - break; - case 1: - reg->val = I915_READ8(reg->offset); - break; - default: - WARN_ON(1); - return -EINVAL; - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 331c00b69f15..a55315a8d5a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -391,11 +391,20 @@ struct drm_i915_display_funcs { /* pll clock increase/decrease */ }; -struct drm_i915_gt_funcs { +struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); }; +struct intel_uncore { + spinlock_t lock; /** lock is also taken in irq contexts. */ + + struct intel_uncore_funcs funcs; + + unsigned fifo_count; + unsigned forcewake_count; +}; + #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ func(is_mobile) sep \ func(is_i85x) sep \ @@ -1045,14 +1054,7 @@ typedef struct drm_i915_private { void __iomem *regs; - struct drm_i915_gt_funcs gt; - /** gt_fifo_count and the subsequent register write are synchronized - * with dev->struct_mutex. */ - unsigned gt_fifo_count; - /** forcewake_count is protected by gt_lock */ - unsigned forcewake_count; - /** gt_lock is also taken in irq contexts. */ - spinlock_t gt_lock; + struct intel_uncore uncore; struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; @@ -1670,8 +1672,14 @@ void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); -extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_sanitize(struct drm_device *dev); +extern void intel_pm_init(struct drm_device *dev); + +extern void intel_uncore_sanitize(struct drm_device *dev); +extern void intel_uncore_early_sanitize(struct drm_device *dev); +extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_reset(struct drm_device *dev); +extern void intel_uncore_clear_errors(struct drm_device *dev); +extern void intel_uncore_check_errors(struct drm_device *dev); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); @@ -2107,7 +2115,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); -int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f708e4efa1be..ee3e49cc0eb4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1307,11 +1307,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ - if (IS_HASWELL(dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed register before interrupt\n"); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } + intel_uncore_check_errors(dev); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index baaefd70cc67..b3389d74d695 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10453,8 +10453,7 @@ intel_display_capture_error_state(struct drm_device *dev) * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to * prevent the next I915_WRITE from detecting it and printing an error * message. */ - if (HAS_POWER_WELL(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + intel_uncore_clear_errors(dev); return error; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3fbe80bc36bb..d9f50e368fe9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -806,7 +806,6 @@ extern void intel_init_power_well(struct drm_device *dev); extern void intel_set_power_well(struct drm_device *dev, bool enable); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); -extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 74d6c4d78360..0a5ba92a4b12 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -32,8 +32,6 @@ #include #include -#define FORCEWAKE_ACK_TIMEOUT_MS 2 - /* FBC, or Frame Buffer Compression, is a technique employed to compress the * framebuffer contents in-memory, aiming at reducing the required bandwidth * during in-memory transfers and, therefore, reduce the power packet. @@ -5289,254 +5287,6 @@ void intel_init_pm(struct drm_device *dev) } } -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) -{ - u32 gt_thread_status_mask; - - if (IS_HASWELL(dev_priv->dev)) - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; - else - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; - - /* w/a for a sporadic read returning 0 by waiting for the GT - * thread to wake up. - */ - if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) - DRM_ERROR("GT thread status wait timed out\n"); -} - -static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - -static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); - - /* WaRsForcewakeWaitTC0:snb */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); -} - -static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) -{ - u32 forcewake_ack; - - if (IS_HASWELL(dev_priv->dev)) - forcewake_ack = FORCEWAKE_ACK_HSW; - else - forcewake_ack = FORCEWAKE_MT_ACK; - - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); - - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); - - /* WaRsForcewakeWaitTC0:ivb,hsw */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -/* - * Generally this is called implicitly by the register read function. However, - * if some sequence requires the GT to not power down then this function should - * be called at the beginning of the sequence followed by a call to - * gen6_gt_force_wake_put() at the end of the sequence. - */ -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (dev_priv->forcewake_count++ == 0) - dev_priv->gt.force_wake_get(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) -{ - u32 gtfifodbg; - gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); - if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, - "MMIO read or write has been dropped %x\n", gtfifodbg)) - I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); -} - -static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* something from same cacheline, but !FORCEWAKE */ - POSTING_READ(ECOBUS); - gen6_gt_check_fifodbg(dev_priv); -} - -static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); - gen6_gt_check_fifodbg(dev_priv); -} - -/* - * see gen6_gt_force_wake_get() - */ -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (--dev_priv->forcewake_count == 0) - dev_priv->gt.force_wake_put(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { - int loop = 500; - u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { - udelay(10); - fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - } - if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) - ++ret; - dev_priv->gt_fifo_count = fifo; - } - dev_priv->gt_fifo_count--; - - return ret; -} - -static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); - /* something from same cacheline, but !FORCEWAKE_VLV */ - POSTING_READ(FORCEWAKE_ACK_VLV); -} - -static void vlv_force_wake_get(struct drm_i915_private *dev_priv) -{ - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & - FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); - - /* WaRsForcewakeWaitTC0:vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void vlv_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); -} - -void intel_gt_sanitize(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } - - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - if (INTEL_INFO(dev)->gen >= 6) - intel_disable_gt_powersave(dev); -} - -void intel_gt_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - dev_priv->gt.force_wake_get = vlv_force_wake_get; - dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_HASWELL(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; - } else if (IS_IVYBRIDGE(dev)) { - u32 ecobus; - - /* IVB configs may use multi-threaded forcewake */ - - /* A small trick here - if the bios hasn't configured - * MT forcewake, and if the device is in RC6, then - * force_wake_mt_get will not wake the device and the - * ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT - * forcewake being disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - dev_priv->gt.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = - __gen6_gt_force_wake_mt_put; - } else { - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); - DRM_INFO("when using vblank-synced partial screen updates.\n"); - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } - } else if (IS_GEN6(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, - intel_gen6_powersave_work); -} - int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5639,3 +5389,11 @@ int vlv_freq_opcode(int ddr_freq, int val) return val; } +void intel_pm_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, + intel_gen6_powersave_work); +} + diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c new file mode 100644 index 000000000000..97e8b1b86476 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -0,0 +1,571 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "i915_drv.h" +#include "intel_drv.h" + +#define FORCEWAKE_ACK_TIMEOUT_MS 2 + +static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +{ + u32 gt_thread_status_mask; + + if (IS_HASWELL(dev_priv->dev)) + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; + else + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; + + /* w/a for a sporadic read returning 0 by waiting for the GT + * thread to wake up. + */ + if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + DRM_ERROR("GT thread status wait timed out\n"); +} + +static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ +} + +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + /* WaRsForcewakeWaitTC0:snb */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); +} + +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) +{ + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_MT_ACK; + + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); + + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + /* WaRsForcewakeWaitTC0:ivb,hsw */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) +{ + u32 gtfifodbg; + gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); + if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, + "MMIO read or write has been dropped %x\n", gtfifodbg)) + I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); +} + +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + /* something from same cacheline, but !FORCEWAKE */ + POSTING_READ(ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } + if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) + ++ret; + dev_priv->uncore.fifo_count = fifo; + } + dev_priv->uncore.fifo_count--; + + return ret; +} + +static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_VLV */ + POSTING_READ(FORCEWAKE_ACK_VLV); +} + +static void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & + FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); + + /* WaRsForcewakeWaitTC0:vlv */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); +} + +void intel_uncore_early_sanitize(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_FPGA_DBG_UNCLAIMED(dev)) + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +} + +void intel_uncore_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; + } else if (IS_HASWELL(dev)) { + dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } + } else if (IS_GEN6(dev)) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } +} + +void intel_uncore_sanitize(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + intel_disable_gt_powersave(dev); +} + +/* + * Generally this is called implicitly by the register read function. However, + * if some sequence requires the GT to not power down then this function should + * be called at the beginning of the sequence followed by a call to + * gen6_gt_force_wake_put() at the end of the sequence. + */ +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (dev_priv->uncore.forcewake_count++ == 0) + dev_priv->uncore.funcs.force_wake_get(dev_priv); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +/* + * see gen6_gt_force_wake_get() + */ +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (--dev_priv->uncore.forcewake_count == 0) + dev_priv->uncore.funcs.force_wake_put(dev_priv); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) + +static void +ilk_dummy_write(struct drm_i915_private *dev_priv) +{ + /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up + * the chip from rc6 before touching it for real. MI_MODE is masked, + * hence harmless to write 0 into. */ + I915_WRITE_NOTRACE(MI_MODE, 0); +} + +static void +hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unknown unclaimed register before writing to %x\n", + reg); + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +static void +hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unclaimed write to %x\n", reg); + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +#define __i915_read(x, y) \ +u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + unsigned long irqflags; \ + u##x val = 0; \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + if (IS_GEN5(dev_priv->dev)) \ + ilk_dummy_write(dev_priv); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv); \ + val = read##y(dev_priv->regs + reg); \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv); \ + } else { \ + val = read##y(dev_priv->regs + reg); \ + } \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + trace_i915_reg_rw(false, reg, val, sizeof(val)); \ + return val; \ +} + +__i915_read(8, b) +__i915_read(16, w) +__i915_read(32, l) +__i915_read(64, q) +#undef __i915_read + +#define __i915_write(x, y) \ +void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + unsigned long irqflags; \ + u32 __fifo_ret = 0; \ + trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ + } \ + if (IS_GEN5(dev_priv->dev)) \ + ilk_dummy_write(dev_priv); \ + hsw_unclaimed_reg_clear(dev_priv, reg); \ + write##y(val, dev_priv->regs + reg); \ + if (unlikely(__fifo_ret)) { \ + gen6_gt_check_fifodbg(dev_priv); \ + } \ + hsw_unclaimed_reg_check(dev_priv, reg); \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ +} +__i915_write(8, b) +__i915_write(16, w) +__i915_write(32, l) +__i915_write(64, q) +#undef __i915_write + +static const struct register_whitelist { + uint64_t offset; + uint32_t size; + uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ +} whitelist[] = { + { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reg_read *reg = data; + struct register_whitelist const *entry = whitelist; + int i; + + for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { + if (entry->offset == reg->offset && + (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + break; + } + + if (i == ARRAY_SIZE(whitelist)) + return -EINVAL; + + switch (entry->size) { + case 8: + reg->val = I915_READ64(reg->offset); + break; + case 4: + reg->val = I915_READ(reg->offset); + break; + case 2: + reg->val = I915_READ16(reg->offset); + break; + case 1: + reg->val = I915_READ8(reg->offset); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static int i8xx_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I85X(dev)) + return -ENODEV; + + I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + if (IS_I830(dev) || IS_845G(dev)) { + I915_WRITE(DEBUG_RESET_I830, + DEBUG_RESET_DISPLAY | + DEBUG_RESET_RENDER | + DEBUG_RESET_FULL); + POSTING_READ(DEBUG_RESET_I830); + msleep(1); + + I915_WRITE(DEBUG_RESET_I830, 0); + POSTING_READ(DEBUG_RESET_I830); + } + + msleep(1); + + I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + return 0; +} + +static int i965_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int i965_do_reset(struct drm_device *dev) +{ + int ret; + + /* + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. + */ + pci_write_config_byte(dev->pdev, I965_GDRST, + GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + pci_write_config_byte(dev->pdev, I965_GDRST, + GRDOM_MEDIA | GRDOM_RESET_ENABLE); + + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + pci_write_config_byte(dev->pdev, I965_GDRST, 0); + + return 0; +} + +static int ironlake_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 gdrst; + int ret; + + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); +} + +static int gen6_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + unsigned long irqflags; + + /* Hold uncore.lock across reset to prevent any register access + * with forcewake not set correctly + */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* Reset the chip */ + + /* GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); + + /* Spin waiting for the device to ack the reset request */ + ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + + /* If reset with a user forcewake, try to restore, otherwise turn it off */ + if (dev_priv->uncore.forcewake_count) + dev_priv->uncore.funcs.force_wake_get(dev_priv); + else + dev_priv->uncore.funcs.force_wake_put(dev_priv); + + /* Restore fifo count */ + dev_priv->uncore.fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return ret; +} + +int intel_gpu_reset(struct drm_device *dev) +{ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: return gen6_do_reset(dev); + case 5: return ironlake_do_reset(dev); + case 4: return i965_do_reset(dev); + case 2: return i8xx_do_reset(dev); + default: return -ENODEV; + } +} + +void intel_uncore_clear_errors(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_FPGA_DBG_UNCLAIMED(dev)) + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +} + +void intel_uncore_check_errors(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_FPGA_DBG_UNCLAIMED(dev) && + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unclaimed register before interrupt\n"); + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} -- cgit v1.2.3 From 6af5d92f909796cb706f3b9efefd75cb0f5afcff Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2013 20:36:53 +0100 Subject: drm/i915: Use a private interface for register access within GT The GT functions for enabling register access also need to occasionally write to and read from registers. To avoid the potential recursion as we modify the public interface to be stricter, introduce a private register access API for the GT functions. v2: Rebase v3: Rebase onto uncore v4: Use raw interfaces consistently so that we only use the low-level readN functions from a single location. Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 22 +++--- drivers/gpu/drm/i915/intel_uncore.c | 136 +++++++++++++++++++++--------------- 2 files changed, 90 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a55315a8d5a3..cf40bb16bb37 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2133,22 +2133,20 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, int vlv_gpu_freq(int ddr_freq, int val); int vlv_freq_opcode(int ddr_freq, int val); -#define __i915_read(x, y) \ +#define __i915_read(x) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); - -__i915_read(8, b) -__i915_read(16, w) -__i915_read(32, l) -__i915_read(64, q) +__i915_read(8) +__i915_read(16) +__i915_read(32) +__i915_read(64) #undef __i915_read -#define __i915_write(x, y) \ +#define __i915_write(x) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); - -__i915_write(8, b) -__i915_write(16, w) -__i915_write(32, l) -__i915_write(64, q) +__i915_write(8) +__i915_write(16) +__i915_write(32) +__i915_write(64) #undef __i915_write #define I915_READ8(reg) i915_read8(dev_priv, (reg)) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 97e8b1b86476..228bc7a3f373 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -26,6 +26,21 @@ #define FORCEWAKE_ACK_TIMEOUT_MS 2 +#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) +#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) +#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) +#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) +#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) + + static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) { u32 gt_thread_status_mask; @@ -38,26 +53,28 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) /* w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) DRM_ERROR("GT thread status wait timed out\n"); } static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + /* something from same cacheline, but !FORCEWAKE */ + __raw_posting_read(dev_priv, ECOBUS); } static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + __raw_i915_write32(dev_priv, FORCEWAKE, 1); + /* something from same cacheline, but !FORCEWAKE */ + __raw_posting_read(dev_priv, ECOBUS); - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); @@ -67,9 +84,9 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); + __raw_posting_read(dev_priv, ECOBUS); } static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) @@ -81,15 +98,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) else forcewake_ack = FORCEWAKE_MT_ACK; - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, + if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MT, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); + __raw_posting_read(dev_priv, ECOBUS); - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), + if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); @@ -100,25 +118,27 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) { u32 gtfifodbg; - gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); + + gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, "MMIO read or write has been dropped %x\n", gtfifodbg)) - I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); + __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE, 0); + __raw_i915_write32(dev_priv, FORCEWAKE, 0); /* something from same cacheline, but !FORCEWAKE */ - POSTING_READ(ECOBUS); + __raw_posting_read(dev_priv, ECOBUS); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MT, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); + __raw_posting_read(dev_priv, ECOBUS); gen6_gt_check_fifodbg(dev_priv); } @@ -128,10 +148,10 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { int loop = 500; - u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { udelay(10); - fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); } if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) ++ret; @@ -144,26 +164,28 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_VLV */ - POSTING_READ(FORCEWAKE_ACK_VLV); + __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); } static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & FORCEWAKE_KERNEL), FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); @@ -174,8 +196,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); @@ -186,7 +209,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (HAS_FPGA_DBG_UNCLAIMED(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } void intel_uncore_init(struct drm_device *dev) @@ -213,7 +236,7 @@ void intel_uncore_init(struct drm_device *dev) */ mutex_lock(&dev->struct_mutex); __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); + ecobus = __raw_i915_read32(dev_priv, ECOBUS); __gen6_gt_force_wake_mt_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -295,17 +318,17 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up * the chip from rc6 before touching it for real. MI_MODE is masked, * hence harmless to write 0 into. */ - I915_WRITE_NOTRACE(MI_MODE, 0); + __raw_i915_write32(dev_priv, MI_MODE, 0); } static void hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) { if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } @@ -313,13 +336,13 @@ static void hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) { if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { DRM_ERROR("Unclaimed write to %x\n", reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } -#define __i915_read(x, y) \ +#define __i915_read(x) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ unsigned long irqflags; \ u##x val = 0; \ @@ -329,24 +352,24 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (dev_priv->uncore.forcewake_count == 0) \ dev_priv->uncore.funcs.force_wake_get(dev_priv); \ - val = read##y(dev_priv->regs + reg); \ + val = __raw_i915_read##x(dev_priv, reg); \ if (dev_priv->uncore.forcewake_count == 0) \ dev_priv->uncore.funcs.force_wake_put(dev_priv); \ } else { \ - val = read##y(dev_priv->regs + reg); \ + val = __raw_i915_read##x(dev_priv, reg); \ } \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } -__i915_read(8, b) -__i915_read(16, w) -__i915_read(32, l) -__i915_read(64, q) +__i915_read(8) +__i915_read(16) +__i915_read(32) +__i915_read(64) #undef __i915_read -#define __i915_write(x, y) \ +#define __i915_write(x) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ unsigned long irqflags; \ u32 __fifo_ret = 0; \ @@ -358,17 +381,17 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (IS_GEN5(dev_priv->dev)) \ ilk_dummy_write(dev_priv); \ hsw_unclaimed_reg_clear(dev_priv, reg); \ - write##y(val, dev_priv->regs + reg); \ + __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ hsw_unclaimed_reg_check(dev_priv, reg); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ } -__i915_write(8, b) -__i915_write(16, w) -__i915_write(32, l) -__i915_write(64, q) +__i915_write(8) +__i915_write(16) +__i915_write(32) +__i915_write(64) #undef __i915_write static const struct register_whitelist { @@ -521,10 +544,10 @@ static int gen6_do_reset(struct drm_device *dev) * for fifo space for the write or forcewake the chip for * the read */ - I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); + __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); /* Spin waiting for the device to ack the reset request */ - ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->uncore.forcewake_count) @@ -533,7 +556,7 @@ static int gen6_do_reset(struct drm_device *dev) dev_priv->uncore.funcs.force_wake_put(dev_priv); /* Restore fifo count */ - dev_priv->uncore.fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); return ret; @@ -555,8 +578,9 @@ void intel_uncore_clear_errors(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + /* XXX needs spinlock around caller's grouping */ if (HAS_FPGA_DBG_UNCLAIMED(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } void intel_uncore_check_errors(struct drm_device *dev) @@ -564,8 +588,8 @@ void intel_uncore_check_errors(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (HAS_FPGA_DBG_UNCLAIMED(dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { DRM_ERROR("Unclaimed register before interrupt\n"); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } -- cgit v1.2.3 From dba8e41f2be04de58eadf78f524b3f981bf438c2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2013 20:36:54 +0100 Subject: drm/i915: Use the common register access functions for NOTRACE variants Detangle the confusion that NOTRACE variants of the register read/write routines were directly using the raw register access. We need for those routines to reuse the common code for serializing register access and ensuring the correct register power states. This is only possible now that the only routines that required raw access use their own API. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 28 ++++++++++++++-------------- drivers/gpu/drm/i915/intel_uncore.c | 8 ++++---- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cf40bb16bb37..637e541c3b9d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2134,7 +2134,7 @@ int vlv_gpu_freq(int ddr_freq, int val); int vlv_freq_opcode(int ddr_freq, int val); #define __i915_read(x) \ - u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); + u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); __i915_read(8) __i915_read(16) __i915_read(32) @@ -2142,28 +2142,28 @@ __i915_read(64) #undef __i915_read #define __i915_write(x) \ - void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); + void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); __i915_write(8) __i915_write(16) __i915_write(32) __i915_write(64) #undef __i915_write -#define I915_READ8(reg) i915_read8(dev_priv, (reg)) -#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) +#define I915_READ8(reg) i915_read8(dev_priv, (reg), true) +#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) -#define I915_READ16(reg) i915_read16(dev_priv, (reg)) -#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) -#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) +#define I915_READ16(reg) i915_read16(dev_priv, (reg), true) +#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) +#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) +#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) -#define I915_READ(reg) i915_read32(dev_priv, (reg)) -#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) -#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) +#define I915_READ(reg) i915_read32(dev_priv, (reg), true) +#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) +#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) +#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) -#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) -#define I915_READ64(reg) i915_read64(dev_priv, (reg)) +#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) +#define I915_READ64(reg) i915_read64(dev_priv, (reg), true) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 228bc7a3f373..2dcf682d8dad 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -343,7 +343,7 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) } #define __i915_read(x) \ -u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ +u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ unsigned long irqflags; \ u##x val = 0; \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ @@ -359,7 +359,7 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ val = __raw_i915_read##x(dev_priv, reg); \ } \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ - trace_i915_reg_rw(false, reg, val, sizeof(val)); \ + if (trace) trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } @@ -370,10 +370,10 @@ __i915_read(64) #undef __i915_read #define __i915_write(x) \ -void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ +void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ unsigned long irqflags; \ u32 __fifo_ret = 0; \ - trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + if (trace) trace_i915_reg_rw(true, reg, val, sizeof(val)); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ -- cgit v1.2.3 From a7f31ee0b00203fcf47fb74a1d61a1c9be8d142e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2013 20:36:55 +0100 Subject: drm/i915: Squash gen lookup through multiple indirections inside GT access The INTEL_INFO() macro extracts the dev_private pointer from the device, so passing in the dev_private->dev is a long winded circumlocution. v2: rebase onto uncore Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2dcf682d8dad..89bb9da377fc 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -347,7 +347,7 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ unsigned long irqflags; \ u##x val = 0; \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - if (IS_GEN5(dev_priv->dev)) \ + if (dev_priv->info->gen == 5) \ ilk_dummy_write(dev_priv); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (dev_priv->uncore.forcewake_count == 0) \ @@ -378,7 +378,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - if (IS_GEN5(dev_priv->dev)) \ + if (dev_priv->info->gen == 5) \ ilk_dummy_write(dev_priv); \ hsw_unclaimed_reg_clear(dev_priv, reg); \ __raw_i915_write##x(dev_priv, reg, val); \ -- cgit v1.2.3 From ed71f1b48e95408d0b3ded014a15fb9d52ac5a86 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2013 20:36:56 +0100 Subject: drm/i915: Convert the register access tracepoint to be conditional The TRACE_EVENT_CONDITION is supposed to generate more efficient code than if (cond) trace(), which is what we are currently using inside the register access functions. v2: Rebase onto uncore Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_trace.h | 8 +++++--- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0e904986f3e9..ed72fe08217c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1004,7 +1004,7 @@ static int gen6_drpc_info(struct seq_file *m) } gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); - trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); + trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7d283b5fcbf9..2933e2ffeaa4 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -406,10 +406,12 @@ TRACE_EVENT(i915_flip_complete, TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) ); -TRACE_EVENT(i915_reg_rw, - TP_PROTO(bool write, u32 reg, u64 val, int len), +TRACE_EVENT_CONDITION(i915_reg_rw, + TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), - TP_ARGS(write, reg, val, len), + TP_ARGS(write, reg, val, len, trace), + + TP_CONDITION(trace), TP_STRUCT__entry( __field(u64, val) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 89bb9da377fc..8f5bc869c023 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -359,7 +359,7 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ val = __raw_i915_read##x(dev_priv, reg); \ } \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ - if (trace) trace_i915_reg_rw(false, reg, val, sizeof(val)); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val; \ } @@ -373,7 +373,7 @@ __i915_read(64) void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ unsigned long irqflags; \ u32 __fifo_ret = 0; \ - if (trace) trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ -- cgit v1.2.3 From c20e835586c0e4d08f891362b3c829d45ef45f9d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 Jul 2013 22:40:23 +0200 Subject: drm/i915: fix the racy object accounting Just use a spinlock to protect them. v2: Rebase onto the new object create refcount fix patch. v3: Don't kill dev_priv->mm.object_memory as requested by Chris and hence just use a spinlock instead of atomic_t. Cc: Chris Wilson Reviewed-by: Chris Wilson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67287 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8536acd0a85d..f48f1c476977 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1478,6 +1478,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->backlight.lock); spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 637e541c3b9d..82ea281b2182 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -935,6 +935,7 @@ struct i915_gem_mm { struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ + spinlock_t object_stat_lock; size_t object_memory; u32 object_count; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eceab96d76f5..35d17fb1b89b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -75,15 +75,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) { + spin_lock(&dev_priv->mm.object_stat_lock); dev_priv->mm.object_count++; dev_priv->mm.object_memory += size; + spin_unlock(&dev_priv->mm.object_stat_lock); } static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size) { + spin_lock(&dev_priv->mm.object_stat_lock); dev_priv->mm.object_count--; dev_priv->mm.object_memory -= size; + spin_unlock(&dev_priv->mm.object_stat_lock); } static int -- cgit v1.2.3 From 3b27af3560f3cfe4e09171024515fa304ebae93b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 25 Jul 2013 16:22:31 +0300 Subject: drm/i915: dvo_ch7xxx: fix vsync polarity setting This fixes a typo which set the wrong vsync and possibly also hsync polarity for any modes with positive vsync polarity. Signed-off-by: Imre Deak Cc: Jesse Barnes Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/dvo_ch7xxx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 757e0fa11043..af42e94f6846 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, idf |= CH7xxx_IDF_HSP; if (mode->flags & DRM_MODE_FLAG_PVSYNC) - idf |= CH7xxx_IDF_HSP; + idf |= CH7xxx_IDF_VSP; ch7xxx_writeb(dvo, CH7xxx_IDF, idf); } -- cgit v1.2.3 From aed2c03c8d96ea471b86761129c213e05ab6fbef Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Fri, 26 Jul 2013 12:09:32 +0200 Subject: drm/gem: fix mmap vma size calculations The VMA manager is page-size based so drm_vma_node_size() returns the size in pages. However, drm_gem_mmap_obj() requires the size in bytes. Apply PAGE_SHIFT so we no longer get EINVAL during mmaps due to too small buffers. This bug was introduced in commit: 0de23977cfeb5b357ec884ba15417ae118ff9e9b "drm/gem: convert to new unified vma manager" Fixes i915 gtt mmap failure reported by Sedat Dilek in: Re: linux-next: Tree for Jul 25 [ call-trace: drm | drm-intel related? ] Cc: Daniel Vetter Cc: Chris Wilson Signed-off-by: David Herrmann Reported-by: Sedat Dilek Tested-by: Sedat Dilek Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 3613b50b5c26..1f7657286f04 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -666,7 +666,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) } obj = container_of(node, struct drm_gem_object, vma_node); - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node), vma); + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From b8f102e8bf71cacf33326360fdf9dcfd1a63925b Mon Sep 17 00:00:00 2001 From: Egbert Eich Date: Fri, 26 Jul 2013 14:14:24 +0200 Subject: drm/i915: Add messages useful for HPD storm detection debugging (v2) For HPD storm detection we now mask out individual interrupt source bits. We have already seen a case where HPD interrupt enable bits were assigned to the wrong pins. To track these conditions more easily add some debugging messages. v2: Spelling fixes as suggested by Jani Nikula Signed-off-by: Egbert Eich Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ee3e49cc0eb4..6a1c207a296b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -919,6 +919,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { + WARN(((hpd[i] & hotplug_trigger) && + dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), + "Received HPD interrupt although disabled\n"); + if (!(hpd[i] & hotplug_trigger) || dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) continue; @@ -929,6 +933,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; dev_priv->hpd_stats[i].hpd_cnt = 0; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; dev_priv->hpd_event_bits &= ~(1 << i); @@ -936,6 +941,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, storm_detected = true; } else { dev_priv->hpd_stats[i].hpd_cnt++; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, + dev_priv->hpd_stats[i].hpd_cnt); } } -- cgit v1.2.3 From bc86625a4ff7574d4d4dba79723457711eb784e0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 21 Jul 2013 16:00:03 +0100 Subject: drm/i915: Retry DP aux_ch communications with a different clock after failure The w/a db makes the recommendation to both use a non-default value for the initial clock and then to retry with an alternative clock for Haswell with the Lakeport PCH. "On LPT:H, use a divider value of 63 decimal (03Fh). If there is a failure, retry at least three times with 63, then retry at least three times with 72 decimal (048h)." Signed-off-by: Chris Wilson Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 92 +++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c6996ced2e5f..4a7ba5ea9ee3 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -276,7 +276,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) return status; } -static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp) +static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, + int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -290,22 +291,27 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp) * clock divider. */ if (IS_VALLEYVIEW(dev)) { - return 100; + return index ? 0 : 100; } else if (intel_dig_port->port == PORT_A) { + if (index) + return 0; if (HAS_DDI(dev)) - return DIV_ROUND_CLOSEST( - intel_ddi_get_cdclk_freq(dev_priv), 2000); + return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); else if (IS_GEN6(dev) || IS_GEN7(dev)) return 200; /* SNB & IVB eDP input clock at 400Mhz */ else return 225; /* eDP input clock at 450Mhz */ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { /* Workaround for non-ULT HSW */ - return 74; + switch (index) { + case 0: return 63; + case 1: return 72; + default: return 0; + } } else if (HAS_PCH_SPLIT(dev)) { - return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); } else { - return intel_hrawclk(dev) / 2; + return index ? 0 :intel_hrawclk(dev) / 2; } } @@ -319,10 +325,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t ch_data = ch_ctl + 4; + uint32_t aux_clock_divider; int i, ret, recv_bytes; uint32_t status; - uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp); - int try, precharge; + int try, precharge, clock = 0; bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); /* dp aux is extremely sensitive to irq latency, hence request the @@ -353,37 +359,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } - /* Must try at least 3 times according to DP spec */ - for (try = 0; try < 5; try++) { - /* Load the send data into the aux channel data registers */ - for (i = 0; i < send_bytes; i += 4) - I915_WRITE(ch_data + i, - pack_aux(send + i, send_bytes - i)); - - /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, - DP_AUX_CH_CTL_SEND_BUSY | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - - status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); - - /* Clear done status and any errors */ - I915_WRITE(ch_ctl, - status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - - if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR)) - continue; + while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) + I915_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); + + /* Send the command and wait for it to complete */ + I915_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); + + /* Clear done status and any errors */ + I915_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR)) + continue; + if (status & DP_AUX_CH_CTL_DONE) + break; + } if (status & DP_AUX_CH_CTL_DONE) break; } @@ -1453,7 +1463,7 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp); + uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); int precharge = 0x3; int msg_size = 5; /* Header(4) + Message(1) */ -- cgit v1.2.3 From de51f04f06d5e4a37f8e5a2b1019eb34140480f0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 21 Jul 2013 17:23:11 +0100 Subject: drm/i915: Replace open-coded offset_in_page() Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1734825bef34..5b6d764e9bb2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -255,7 +255,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, reloc->delta += target_offset; if (use_cpu_reloc(obj)) { - uint32_t page_offset = reloc->offset & ~PAGE_MASK; + uint32_t page_offset = offset_in_page(reloc->offset); char *vaddr; ret = i915_gem_object_set_to_cpu_domain(obj, 1); @@ -284,7 +284,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, reloc->offset & PAGE_MASK); reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc->offset & ~PAGE_MASK)); + (reloc_page + offset_in_page(reloc->offset)); iowrite32(reloc->delta, reloc_entry); io_mapping_unmap_atomic(reloc_page); } -- cgit v1.2.3 From 257a7ffcfaf68718c963db6e9978d1f4f647986b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 26 Jul 2013 08:35:42 +0200 Subject: drm/i915: fix pnv display core clock readout out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need the correct clock to accurately assess whether we need to enable the double wide pipe mode or not. Cc: Chris Wilson Cc: Stéphane Marchesin Cc: Stuart Abercrombie Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 ++++++ drivers/gpu/drm/i915/intel_display.c | 29 ++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6caa748fa00f..3aebe5dee4df 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -61,6 +61,12 @@ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) +#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) +#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) +#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) +#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) #define GC_DISPLAY_CLOCK_MASK (7 << 4) #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b3389d74d695..3e66f05ea342 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4163,6 +4163,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) return 200000; } +static int pnv_get_display_clock_speed(struct drm_device *dev) +{ + u16 gcfgc = 0; + + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + + switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { + case GC_DISPLAY_CLOCK_267_MHZ_PNV: + return 267000; + case GC_DISPLAY_CLOCK_333_MHZ_PNV: + return 333000; + case GC_DISPLAY_CLOCK_444_MHZ_PNV: + return 444000; + case GC_DISPLAY_CLOCK_200_MHZ_PNV: + return 200000; + default: + DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); + case GC_DISPLAY_CLOCK_133_MHZ_PNV: + return 133000; + case GC_DISPLAY_CLOCK_167_MHZ_PNV: + return 167000; + } +} + static int i915gm_get_display_clock_speed(struct drm_device *dev) { u16 gcfgc = 0; @@ -9605,9 +9629,12 @@ static void intel_init_display(struct drm_device *dev) else if (IS_I915G(dev)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; - else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) + else if (IS_I945GM(dev) || IS_845G(dev)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; + else if (IS_PINEVIEW(dev)) + dev_priv->display.get_display_clock_speed = + pnv_get_display_clock_speed; else if (IS_I915GM(dev)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; -- cgit v1.2.3 From 86e81f0e624b55fa9f1560c3b64bc80e458c5168 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Thu, 25 Jul 2013 18:02:31 +0200 Subject: drm/mm: include required headers in drm_mm.h We need BUG_ON(), spinlock_t and standard kernel data-types so include the right headers. Subject: [drm-intel:drm-intel-nightly 154/166] include/drm/drm_mm.h:67:2: error: unknown type name 'spinlock_t' Message-ID: <51f14693.g5HGdcuw2v3m8FOd%fengguang.wu@intel.com> In case it didn't link to it correctly. Somehow this bug doesn't occur here on my machine, hmm. But I think fixing drm_mm.h is better than changing the include-order in drm_vma_manager.h, so this is what I did. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- include/drm/drm_mm.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index b87d05e17d46..98cb50ea6acb 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -36,7 +36,10 @@ /* * Generic range manager structs */ +#include +#include #include +#include #ifdef CONFIG_DEBUG_FS #include #endif -- cgit v1.2.3 From cd234b0bfd5ab012e42274b24aae420fa1823d58 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 2 Aug 2013 20:39:49 +0100 Subject: drm/i915: Do not dereference NULL crtc or fb until after checking Fixes regression from commit 4906557eb37b7fef84fad4304acef6dedf919880 Author: Rodrigo Vivi Date: Thu Jul 11 18:45:05 2013 -0300 drm/i915: Hook PSR functionality Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67526 Signed-off-by: Chris Wilson Cc: Rodrigo Vivi Cc: Daniel Vetter Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4a7ba5ea9ee3..d0c3f9b08387 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1545,12 +1545,21 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } + crtc = dig_port->base.base.crtc; + if (crtc == NULL) { + DRM_DEBUG_KMS("crtc not active for PSR\n"); + dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; + return false; + } + + intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { DRM_DEBUG_KMS("crtc not active for PSR\n"); dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; return false; } + obj = to_intel_framebuffer(crtc->fb)->obj; if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); -- cgit v1.2.3 From 69438e64afc7343e641afa57f6e73618e46d8984 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:36:57 +0200 Subject: drm/i915/dvo: use intel_encoder to the upcast macro More natural and will soon be even better! Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dvo.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 8b4ad27791f3..39cf596cc42c 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -100,9 +100,9 @@ struct intel_dvo { bool panel_wants_dither; }; -static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) +static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dvo, base.base); + return container_of(encoder, struct intel_dvo, base); } static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) @@ -123,7 +123,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -140,7 +140,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp, flags = 0; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -159,7 +159,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_disable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -171,7 +171,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder) static void intel_enable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -245,7 +245,7 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -279,7 +279,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; @@ -391,7 +391,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); if (intel_dvo->dev.dev_ops->destroy) intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); -- cgit v1.2.3 From a34703752e0b682ab4e6fccf1ce675176cf1dad2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:36:58 +0200 Subject: drm/i915/dvo: switch ->mode_fixup to ->compute_config This is the last encoder ->mode_fixup callback we have left, so convert it. Note that we want to only rip out the encoder->mode_fixup callback. But we still have the dvo_slave->mode_fixup callback. dvo is gen2 only, so we won't ever touch this again. Hence why I didn't go through all 6-7 dvo slave drivers and give them the same treatment. I'll add a note to the commit message about this when merging, presuming there's nothing else in the patch that needs to be fixed up. Reviewed-by: Rodrigo Vivi [danvet: Add note about why we keep the dvo->mode_fixup callback to answer a question from Rodrigo's review.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dvo.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 39cf596cc42c..51eadc944d9a 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -241,11 +241,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } -static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool intel_dvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { - struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -267,7 +267,9 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, } if (intel_dvo->dev.dev_ops->mode_fixup) - return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); + return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, + &pipe_config->requested_mode, + adjusted_mode); return true; } @@ -372,7 +374,6 @@ static void intel_dvo_destroy(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - .mode_fixup = intel_dvo_mode_fixup, .mode_set = intel_dvo_mode_set, }; @@ -470,6 +471,7 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->enable = intel_enable_dvo; intel_encoder->get_hw_state = intel_dvo_get_hw_state; intel_encoder->get_config = intel_dvo_get_config; + intel_encoder->compute_config = intel_dvo_compute_config; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; /* Now, try to find a controller */ -- cgit v1.2.3 From efea6e8e49388478be405b3ae62644ef06dca9a1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:36:59 +0200 Subject: drm/i915: rip out legacy encoder->mode_fixup logic Everyone is now using our own ->compute_config callback, which means we can now also make that callback mandatory. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3e66f05ea342..e9120843a9d7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4109,7 +4109,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } /* All interlaced capable intel hw wants timings in frames. Note though - * that intel_lvds_mode_fixup does some funny tricks with the crtc + * that intel_lvds_compute_config does some funny tricks with the crtc * timings, so we need to be careful not to clobber these.*/ if (!pipe_config->timings_set) drm_mode_set_crtcinfo(adjusted_mode, 0); @@ -8061,7 +8061,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct drm_encoder_helper_funcs *encoder_funcs; struct intel_encoder *encoder; struct intel_crtc_config *pipe_config; int plane_bpp, ret = -EINVAL; @@ -8106,20 +8105,8 @@ encoder_retry: if (&encoder->new_crtc->base != crtc) continue; - if (encoder->compute_config) { - if (!(encoder->compute_config(encoder, pipe_config))) { - DRM_DEBUG_KMS("Encoder config failure\n"); - goto fail; - } - - continue; - } - - encoder_funcs = encoder->base.helper_private; - if (!(encoder_funcs->mode_fixup(&encoder->base, - &pipe_config->requested_mode, - &pipe_config->adjusted_mode))) { - DRM_DEBUG_KMS("Encoder fixup failed\n"); + if (!(encoder->compute_config(encoder, pipe_config))) { + DRM_DEBUG_KMS("Encoder config failure\n"); goto fail; } } -- cgit v1.2.3 From 79fde3011fe03f4cef31e55eff607180e1c7c5fd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:00 +0200 Subject: drm/i915/dvo: use native encoder ->mode_set callback Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dvo.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 51eadc944d9a..406303b509c1 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -107,8 +107,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_dvo, base); + return enc_to_dvo(intel_attached_encoder(connector)); } static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) @@ -274,15 +273,14 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, return true; } -static void intel_dvo_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_dvo_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); - int pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + int pipe = crtc->pipe; u32 dvo_val; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; @@ -299,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, break; } - intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, + &crtc->config.requested_mode, + adjusted_mode); /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & @@ -373,10 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector) kfree(connector); } -static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - .mode_set = intel_dvo_mode_set, -}; - static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = intel_dvo_dpms, .detect = intel_dvo_detect, @@ -472,6 +468,7 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->get_hw_state = intel_dvo_get_hw_state; intel_encoder->get_config = intel_dvo_get_config; intel_encoder->compute_config = intel_dvo_compute_config; + intel_encoder->mode_set = intel_dvo_mode_set; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; /* Now, try to find a controller */ @@ -538,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_encoder_helper_add(&intel_encoder->base, - &intel_dvo_helper_funcs); - intel_connector_attach_encoder(intel_connector, intel_encoder); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able -- cgit v1.2.3 From 8aca63aae07681a0c9a2a0ebcca82ca5f7f6aa08 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:01 +0200 Subject: drm/i915/sdvo: use intel_encoder for upcast helper It's what all callers (except for the destroy callback which is called from drm core) actually want. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c3b59b8593b9..47423f31f82b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -202,15 +202,14 @@ struct intel_sdvo_connector { u32 cur_dot_crawl, max_dot_crawl; }; -static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) +static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_sdvo, base.base); + return container_of(encoder, struct intel_sdvo, base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_sdvo, base); + return to_sdvo(intel_attached_encoder(connector)); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) @@ -1084,7 +1083,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config) static bool intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *mode = &pipe_config->requested_mode; @@ -1154,7 +1153,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) struct drm_display_mode *adjusted_mode = &intel_crtc->config.adjusted_mode; struct drm_display_mode *mode = &intel_crtc->config.requested_mode; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; @@ -1292,7 +1291,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; u32 tmp; @@ -1315,7 +1314,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_dtd dtd; int encoder_pixel_multiplier = 0; u32 flags = 0, sdvox; @@ -1380,7 +1379,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, static void intel_disable_sdvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u32 temp; intel_sdvo_set_active_outputs(intel_sdvo, 0); @@ -1422,7 +1421,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); u32 temp; bool input1, input2; @@ -1583,7 +1582,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); @@ -2190,7 +2189,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, -- cgit v1.2.3 From cd91ef23c426fe5aeee6ca8090551547b3a8795e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:02 +0200 Subject: drm/i915/tv: Use native encoder->mode_set callback Also switch to intel_encoder for the upcast helper while at it. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_tv.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b0b446f630f7..f2c6d7909ae2 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = { }, }; -static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) +static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_tv, base.base); + return container_of(encoder, struct intel_tv, base); } static struct intel_tv *intel_attached_tv(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_tv, - base); + return enc_to_tv(intel_attached_encoder(connector)); } static bool @@ -908,7 +906,7 @@ static bool intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { - struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base); + struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (!tv_mode) @@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder, return true; } -static void -intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_tv_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); u32 tv_ctl; u32 hctl1, hctl2, hctl3; @@ -1487,10 +1482,6 @@ out: return ret; } -static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { - .mode_set = intel_tv_mode_set, -}; - static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_tv_detect, @@ -1623,6 +1614,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->mode_set = intel_tv_mode_set; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; @@ -1644,7 +1636,6 @@ intel_tv_init(struct drm_device *dev) intel_tv->tv_format = tv_modes[initial_mode].name; - drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; -- cgit v1.2.3 From eebe6f0b3d4207a05000921a5e60b4161f89ca35 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:03 +0200 Subject: drm/i915/crt: use native encoder->mode_set callback Also drop the intel_ prefix from the local intel_crtc variable and reorder the upcast macros a bit for more reuse. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 0c0d4e8d768e..b5a3875f22c7 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -52,15 +52,14 @@ struct intel_crt { u32 adpa_reg; }; -static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) { - return container_of(intel_attached_encoder(connector), - struct intel_crt, base); + return container_of(encoder, struct intel_crt, base); } -static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) { - return container_of(encoder, struct intel_crt, base); + return intel_encoder_to_crt(intel_attached_encoder(connector)); } static bool intel_crt_get_hw_state(struct intel_encoder *encoder, @@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, return true; } -static void intel_crt_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_crt_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crt *crt = - intel_encoder_to_crt(to_intel_encoder(encoder)); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = encoder->base.dev; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; u32 adpa; if (HAS_PCH_SPLIT(dev)) @@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (HAS_PCH_LPT(dev)) ; /* Those bits don't exist here */ else if (HAS_PCH_CPT(dev)) - adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); - else if (intel_crtc->pipe == 0) + adpa |= PORT_TRANS_SEL_CPT(crtc->pipe); + else if (crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; else adpa |= ADPA_PIPE_B_SELECT; if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); + I915_WRITE(BCLRPAT(crtc->pipe), 0); I915_WRITE(crt->adpa_reg, adpa); } @@ -711,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector) * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs crt_encoder_funcs = { - .mode_set = intel_crt_mode_set, -}; - static const struct drm_connector_funcs intel_crt_connector_funcs = { .reset = intel_crt_reset, .dpms = intel_crt_dpms, @@ -804,6 +796,7 @@ void intel_crt_init(struct drm_device *dev) crt->adpa_reg = ADPA; crt->base.compute_config = intel_crt_compute_config; + crt->base.mode_set = intel_crt_mode_set; crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; crt->base.get_config = intel_crt_get_config; @@ -815,7 +808,6 @@ void intel_crt_init(struct drm_device *dev) crt->base.get_hw_state = intel_crt_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; - drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); -- cgit v1.2.3 From c59423a3dd4186ed4c352537c2b572a9bb950fe9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:04 +0200 Subject: drm/i915/hdmi: use native encoder mode_set callback Again drop the intel_ prefix from the intel_crtc local variable to save a bit of space. But here I didn't switch the upcast macros to intel_encoder since all our infoframe interfaces still use drm_encoder. That needs to be changed first. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index af18da76c04b..c16fb34879c1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -591,14 +591,13 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_spd_infoframe(encoder); } -static void intel_hdmi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_hdmi_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; u32 hdmi_val; hdmi_val = SDVO_ENCODING_HDMI; @@ -609,7 +608,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; - if (intel_crtc->config.pipe_bpp > 24) + if (crtc->config.pipe_bpp > 24) hdmi_val |= HDMI_COLOR_FORMAT_12bpc; else hdmi_val |= SDVO_COLOR_FORMAT_8bpc; @@ -620,21 +619,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (intel_hdmi->has_audio) { DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); hdmi_val |= SDVO_AUDIO_ENABLE; hdmi_val |= HDMI_MODE_SELECT_HDMI; - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } if (HAS_PCH_CPT(dev)) - hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); + hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); else - hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); + hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); POSTING_READ(intel_hdmi->hdmi_reg); - intel_hdmi->set_infoframes(encoder, adjusted_mode); + intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); } static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, @@ -1103,10 +1102,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector) kfree(connector); } -static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { - .mode_set = intel_hdmi_mode_set, -}; - static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_hdmi_detect, @@ -1229,9 +1224,9 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_encoder->compute_config = intel_hdmi_compute_config; + intel_encoder->mode_set = intel_hdmi_mode_set; intel_encoder->enable = intel_enable_hdmi; intel_encoder->disable = intel_disable_hdmi; intel_encoder->get_hw_state = intel_hdmi_get_hw_state; -- cgit v1.2.3 From b934223d7abae2f52d22b4734a02b9a0867eafe3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:05 +0200 Subject: drm/i915/dp: use native encoder ->mode_set callback Usual drill applies. Again I've not switched the upcast helpers to use intel_encoder instead of drm_encoder since that's much more invasive and will change also the hdmi and ddi encoders. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d0c3f9b08387..e5eeec30c9bb 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -833,15 +833,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) udelay(500); } -static void -intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_dp_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; - struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; /* * There are four kinds of DP registers: @@ -873,7 +872,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", pipe_name(crtc->pipe)); intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } intel_dp_init_link_config(intel_dp); @@ -3035,10 +3034,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } -static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { - .mode_set = intel_dp_mode_set, -}; - static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_dp_detect, @@ -3518,9 +3513,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); intel_encoder->compute_config = intel_dp_compute_config; + intel_encoder->mode_set = intel_dp_mode_set; intel_encoder->enable = intel_enable_dp; intel_encoder->pre_enable = intel_pre_enable_dp; intel_encoder->disable = intel_disable_dp; -- cgit v1.2.3 From 66df24d926fe0686034bb8d47a0f586310602178 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:06 +0200 Subject: drm/i915/lvds: use the native encoder ->mode_set callback Does nothing, so trivial conversion. But update the outdated comment while at it. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_lvds.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2110df24454b..4d33278e31fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -319,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return true; } -static void intel_lvds_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_lvds_mode_set(struct intel_encoder *encoder) { /* - * The LVDS pin pair will already have been turned on in the - * intel_crtc_mode_set since it has a large impact on the DPLL - * settings. + * We don't do anything here, the LVDS port is fully set up in the pre + * enable hook - the ordering constraints for enabling the lvds port vs. + * enabling the display pll are too strict. */ } @@ -507,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, return 0; } -static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { - .mode_set = intel_lvds_mode_set, -}; - static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, @@ -971,6 +965,7 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; intel_encoder->compute_config = intel_lvds_compute_config; + intel_encoder->mode_set = intel_lvds_mode_set; intel_encoder->disable = intel_disable_lvds; intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; @@ -987,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev) else intel_encoder->crtc_mask = (1 << 1); - drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; -- cgit v1.2.3 From c7d8be305aa28dd809dedd401adcd4da8e4f9144 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:07 +0200 Subject: drm/i915/ddi: use the native encoder ->mode_set callback Same conversion as for hdmi/dp. Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 39 ++++++++++++++++----------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 931b4bb1f9dc..b361c0862373 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -281,25 +281,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI link training failed!\n"); } -static void intel_ddi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_ddi_mode_set(struct intel_encoder *encoder) { - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int port = intel_ddi_get_encoder_port(intel_encoder); - int pipe = intel_crtc->pipe; - int type = intel_encoder->type; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + int port = intel_ddi_get_encoder_port(encoder); + int pipe = crtc->pipe; + int type = encoder->type; + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); - intel_crtc->eld_vld = false; + crtc->eld_vld = false; if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); + enc_to_dig_port(&encoder->base); intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; @@ -307,17 +304,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, if (intel_dp->has_audio) { DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); /* write eld */ DRM_DEBUG_DRIVER("DP audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } intel_dp_init_link_config(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); if (intel_hdmi->has_audio) { /* Proper support for digital audio needs a new logic @@ -325,14 +322,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, * patch bombing. */ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); /* write eld */ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } - intel_hdmi->set_infoframes(encoder, adjusted_mode); + intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); } } @@ -1311,10 +1308,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { .destroy = intel_ddi_destroy, }; -static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { - .mode_set = intel_ddi_mode_set, -}; - void intel_ddi_init(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1339,9 +1332,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) drm_encoder_init(dev, encoder, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); intel_encoder->compute_config = intel_ddi_compute_config; + intel_encoder->mode_set = intel_ddi_mode_set; intel_encoder->enable = intel_enable_ddi; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; -- cgit v1.2.3 From 36f2d1f151215c48d902947d64b86dc5ab277e19 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:08 +0200 Subject: drm/i915: rip out legacy encoder->mode_set callback The encoder->mode_set callback from the crtc helpers is now completely unused in our driver. Good riddance! Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e9120843a9d7..56bc2ef8203f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6220,11 +6220,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder_helper_funcs *encoder_funcs; struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; struct drm_display_mode *mode = &intel_crtc->config.requested_mode; int pipe = intel_crtc->pipe; int ret; @@ -6243,12 +6240,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, encoder->base.base.id, drm_get_encoder_name(&encoder->base), mode->base.id, mode->name); - if (encoder->mode_set) { - encoder->mode_set(encoder); - } else { - encoder_funcs = encoder->base.helper_private; - encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); - } + encoder->mode_set(encoder); } return 0; -- cgit v1.2.3 From 135c81b8c3c9a70d7b55758c9c2a247a4abb7b64 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 21 Jul 2013 21:37:09 +0200 Subject: drm/i915: clean up crtc timings computation In the old days of the crtc helpers we've only had the encoder and crtc ->mode_fixup callbacks. So when the lvds connector wanted to adjust the crtc timings it had to set a driver-private mode flag to tell the crtc mode fixup code to not overwrite them with the generic ones. When converting things to the new infrastructure I've kept the entire logic and only moved the flag to pipe_config->timings_set. But this logic is pretty tricky and already caused regressions: commit 21d8a4756af5fdf4a42e79a77cf3b6f52678d443 Author: Daniel Vetter Date: Fri Jul 12 08:07:30 2013 +0200 drm/i915: fix pfit regression for non-autoscaled resolutions So take advantage of the flexibility our own modeset infrastructure affords us and prefill default crtc timings. This allows us to rip out ->timings_set. Note that we overwrite things again when retrying the pipe config computation due to bandwidth constraints to avoid bogus crtc timings if the encoder only does relative adjustments (which is how the pfit code works). Only a theoretical concern though since platforms where we retry (pch-split platforms) do not need adjustements (since only the old gmch pfit needs that). But let's better be safe than sorry. Since we now initialize the crtc timings before calling the encoder->compute_config functions the crtc initialization in the gmch pfit code is now redudant and so can be removed. Cc: Jesse Barnes Cc: Mika Kuoppala Reviewed-by: Rodrigo Vivi [danvet: Add a paragraph to the commit message to explain why we can ditch the crtc timings initialization call from the gmch pfit code, to answer a question from Rodrigo's review.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 9 +++------ drivers/gpu/drm/i915/intel_drv.h | 4 ---- drivers/gpu/drm/i915/intel_panel.c | 3 --- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56bc2ef8203f..e1e50dfb08da 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4108,12 +4108,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, return -EINVAL; } - /* All interlaced capable intel hw wants timings in frames. Note though - * that intel_lvds_compute_config does some funny tricks with the crtc - * timings, so we need to be careful not to clobber these.*/ - if (!pipe_config->timings_set) - drm_mode_set_crtcinfo(adjusted_mode, 0); - /* Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ @@ -8087,6 +8081,9 @@ encoder_retry: pipe_config->port_clock = 0; pipe_config->pixel_multiplier = 1; + /* Fill in default crtc timings, allow encoders to overwrite them. */ + drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); + /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d9f50e368fe9..474797be1fc2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -208,10 +208,6 @@ struct intel_crtc_config { struct drm_display_mode requested_mode; struct drm_display_mode adjusted_mode; - /* This flag must be set by the encoder's compute_config callback if it - * changes the crtc timings in the mode to prevent the crtc fixup from - * overwriting them. Currently only lvds needs that. */ - bool timings_set; /* Whether to set up the PCH/FDI. Note that we never allow sharing * between pch encoders and cpu encoders. */ bool has_pch_encoder; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 67e2c1f1c9a8..01b5a519c43c 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, adjusted_mode->vdisplay == mode->vdisplay) goto out; - drm_mode_set_crtcinfo(adjusted_mode, 0); - pipe_config->timings_set = true; - switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: /* -- cgit v1.2.3 From 29ebf90f8157f9d01dda2b1555b4a08e9e542b21 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 27 Jul 2013 17:23:55 +0100 Subject: drm/i915: Squelch repeated reasoning for why FBC cannot be activated Almost invariably the reason why FBC cannot be turned on is the same every time (disabled via parameter, too many pipes, pipe too large etc) as modesetting and framebuffer configuration changes less frequently than trying to enable FBC. Signed-off-by: Chris Wilson Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 6 ++++ drivers/gpu/drm/i915/i915_drv.h | 4 ++- drivers/gpu/drm/i915/intel_pm.c | 59 +++++++++++++++++++++++-------------- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ed72fe08217c..eed2f4ca9a76 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1099,6 +1099,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } else { seq_puts(m, "FBC disabled: "); switch (dev_priv->fbc.no_fbc_reason) { + case FBC_OK: + seq_puts(m, "FBC actived, but currently disabled in hardware"); + break; + case FBC_UNSUPPORTED: + seq_puts(m, "unsupported by this chipset"); + break; case FBC_NO_OUTPUT: seq_puts(m, "no outputs"); break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 82ea281b2182..ace691b23072 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -589,7 +589,9 @@ struct i915_fbc { int interval; } *fbc_work; - enum { + enum no_fbc_reason { + FBC_OK, /* FBC is enabled */ + FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a5ba92a4b12..b503f594205e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -421,6 +421,16 @@ void intel_disable_fbc(struct drm_device *dev) dev_priv->fbc.plane = -1; } +static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, + enum no_fbc_reason reason) +{ + if (dev_priv->fbc.no_fbc_reason == reason) + return false; + + dev_priv->fbc.no_fbc_reason = reason; + return true; +} + /** * intel_update_fbc - enable/disable FBC as needed * @dev: the drm_device @@ -450,11 +460,16 @@ void intel_update_fbc(struct drm_device *dev) struct drm_i915_gem_object *obj; unsigned int max_hdisplay, max_vdisplay; - if (!i915_powersave) + if (!I915_HAS_FBC(dev)) { + set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); return; + } - if (!I915_HAS_FBC(dev)) + if (!i915_powersave) { + if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) + DRM_DEBUG_KMS("fbc disabled per module param\n"); return; + } /* * If FBC is already on, we just have to verify that we can @@ -469,9 +484,8 @@ void intel_update_fbc(struct drm_device *dev) if (intel_crtc_active(tmp_crtc) && !to_intel_crtc(tmp_crtc)->primary_disabled) { if (crtc) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->fbc.no_fbc_reason = - FBC_MULTIPLE_PIPES; + if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); goto out_disable; } crtc = tmp_crtc; @@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev) } if (!crtc || crtc->fb == NULL) { - DRM_DEBUG_KMS("no output, disabling\n"); - dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT; + if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) + DRM_DEBUG_KMS("no output, disabling\n"); goto out_disable; } @@ -491,20 +505,20 @@ void intel_update_fbc(struct drm_device *dev) if (i915_enable_fbc < 0 && INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { - DRM_DEBUG_KMS("disabled per chip default\n"); - dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT; + if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) + DRM_DEBUG_KMS("disabled per chip default\n"); goto out_disable; } if (!i915_enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param\n"); - dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM; + if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) + DRM_DEBUG_KMS("fbc disabled per module param\n"); goto out_disable; } if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); - dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE; + if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); goto out_disable; } @@ -517,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev) } if ((crtc->mode.hdisplay > max_hdisplay) || (crtc->mode.vdisplay > max_vdisplay)) { - DRM_DEBUG_KMS("mode too large for compression, disabling\n"); - dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE; + if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); goto out_disable; } if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && intel_crtc->plane != 0) { - DRM_DEBUG_KMS("plane not 0, disabling compression\n"); - dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE; + if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); goto out_disable; } @@ -533,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev) */ if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { - DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); - dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED; + if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) + DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); goto out_disable; } @@ -543,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev) goto out_disable; if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { - DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); - dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL; + if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) + DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); goto out_disable; } @@ -587,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev) } intel_enable_fbc(crtc, 500); + dev_priv->fbc.no_fbc_reason = FBC_OK; return; out_disable: -- cgit v1.2.3 From 08c45263a62af33348e674765710cb49dd3959e0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2013 19:04:37 +0100 Subject: drm/i915: Use the same pte_encoding for ppgtt as for gtt The PTE layouts are the same for both ppgtt and gtt, so we can simplify the setup for ppgtt by copying the encoding function pointer from gtt. This prevents bugs where we update one function pointer, but forget the other. For instance, commit 4d15c145a6234d999c0452eec0d275c1fbf0688c Author: Ben Widawsky Date: Thu Jul 4 11:02:06 2013 -0700 drm/i915: Use eLLC/LLC by default when available only extends the gtt to use eLLC/LLC cacheing and forgets to also update the ppgtt function pointer. v2: Actually mention the bug being fixed (Kenneth) Signed-off-by: Chris Wilson Reviewed-by: Kenneth Graunke Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3b639a94dddf..e7b420495516 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -298,13 +298,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) * now. */ first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); - if (IS_HASWELL(dev)) { - ppgtt->base.pte_encode = hsw_pte_encode; - } else if (IS_VALLEYVIEW(dev)) { - ppgtt->base.pte_encode = byt_pte_encode; - } else { - ppgtt->base.pte_encode = gen6_pte_encode; - } + ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; ppgtt->enable = gen6_ppgtt_enable; ppgtt->base.clear_range = gen6_ppgtt_clear_range; -- cgit v1.2.3 From b3ae96a8ea1cbd0970459b6efd7ea7550fe033c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Marchesin?= Date: Wed, 31 Jul 2013 00:11:07 -0700 Subject: drm/i915: Remove useless define MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Marchesin Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e1e50dfb08da..d82e225294a6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -59,7 +59,6 @@ typedef struct { int p2_slow, p2_fast; } intel_p2_t; -#define INTEL_P2_NUM 2 typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; -- cgit v1.2.3 From 2c1792a10b10e41dcf34c97304fb8f75e52e7112 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 1 Aug 2013 18:39:55 +0100 Subject: drm/i915: Tidy the macro casting by using an inline function Some of our macros we trying to convert from an drm_device to a drm_i915_private and then use the pointer inline. This is not only cumbersome but prone to error. Replacing it with a typesafe function should help catch those errors in future. Signed-off-by: Chris Wilson Reviewed-by: Paulo Zanoni Signed-off-by: Ben Widawsky [danvet: Squash in fixup to correctly order static vs. inline qualifiers, static comes first. Also fix up another offender.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ace691b23072..0b7583cafa3f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1223,6 +1223,11 @@ typedef struct drm_i915_private { struct i915_ums_state ums; } drm_i915_private_t; +static inline struct drm_i915_private *to_i915(const struct drm_device *dev) +{ + return dev->dev_private; +} + /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ @@ -1484,7 +1489,7 @@ struct drm_i915_file_private { struct i915_ctx_hang_stats hang_stats; }; -#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) +#define INTEL_INFO(dev) (to_i915(dev)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) @@ -1578,7 +1583,7 @@ struct drm_i915_file_private { #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 -#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) @@ -1975,7 +1980,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); /* i915_gem_tiling.c */ -inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) +static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = obj->base.dev->dev_private; -- cgit v1.2.3 From bdd57d0386d892e5c470a3d615c3034389700964 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:13 +0300 Subject: drm/i915: Add scaled paramater to update_sprite_watermarks() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For calculating watermarks we want to know whether sprites are scaled. Pass that information to update_sprite_watermarks() so that eventually we may do some watermark pre-computing. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_drv.h | 7 ++++--- drivers/gpu/drm/i915/intel_pm.c | 13 +++++++------ drivers/gpu/drm/i915/intel_sprite.c | 11 +++++++---- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0b7583cafa3f..67a15d00d5f2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -361,7 +361,7 @@ struct drm_i915_display_funcs { void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable); + bool enable, bool scaled); void (*modeset_global_resources)(struct drm_device *dev); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 474797be1fc2..ed33976c194b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -349,7 +349,8 @@ struct intel_plane { * for the watermark calculations. Currently only Haswell uses this. */ struct { - bool enable; + bool enabled; + bool scaled; uint8_t bytes_per_pixel; uint32_t horiz_pixels; } wm; @@ -770,8 +771,8 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port); /* For use by IVB LP watermark workaround in intel_sprite.c */ extern void intel_update_watermarks(struct drm_device *dev); extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, - uint32_t sprite_width, - int pixel_size, bool enable); + uint32_t sprite_width, int pixel_size, + bool enabled, bool scaled); extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, unsigned int tiling_mode, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b503f594205e..f9813b3ead34 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2403,7 +2403,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, pipe = intel_plane->pipe; p = ¶ms[pipe]; - p->sprite_enabled = intel_plane->wm.enable; + p->sprite_enabled = intel_plane->wm.enabled; p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel; p->spr_horiz_pixels = intel_plane->wm.horiz_pixels; @@ -2631,7 +2631,7 @@ static void haswell_update_wm(struct drm_device *dev) static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable) + bool enabled, bool scaled) { struct drm_plane *plane; @@ -2639,7 +2639,8 @@ static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, struct intel_plane *intel_plane = to_intel_plane(plane); if (intel_plane->pipe == pipe) { - intel_plane->wm.enable = enable; + intel_plane->wm.enabled = enabled; + intel_plane->wm.scaled = scaled; intel_plane->wm.horiz_pixels = sprite_width + 1; intel_plane->wm.bytes_per_pixel = pixel_size; break; @@ -2727,7 +2728,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable) + bool enable, bool scaled) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ @@ -2850,13 +2851,13 @@ void intel_update_watermarks(struct drm_device *dev) void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable) + bool enable, bool scaled) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->display.update_sprite_wm) dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, - pixel_size, enable); + pixel_size, enable, scaled); } static struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 55bdf70b548b..069155f17edb 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -114,7 +114,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); @@ -268,7 +269,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); /* * IVB workaround: must disable low power watermarks for at least @@ -336,7 +338,7 @@ ivb_disable_plane(struct drm_plane *plane) dev_priv->sprite_scaling_enabled &= ~(1 << pipe); - intel_update_sprite_watermarks(dev, pipe, 0, 0, false); + intel_update_sprite_watermarks(dev, pipe, 0, 0, false, false); /* potentially re-enable LP watermarks */ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) @@ -456,7 +458,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); dvsscale = 0; if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) -- cgit v1.2.3 From 67ca28f30af8e7555f40b916c28148b432168eec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:14 +0300 Subject: drm/i915: Pass the actual sprite width to watermarks functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't subtract one from the sprite width before watermark calculations. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_sprite.c | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f9813b3ead34..9ef476b2eeeb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2641,7 +2641,7 @@ static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, if (intel_plane->pipe == pipe) { intel_plane->wm.enabled = enabled; intel_plane->wm.scaled = scaled; - intel_plane->wm.horiz_pixels = sprite_width + 1; + intel_plane->wm.horiz_pixels = sprite_width; intel_plane->wm.bytes_per_pixel = pixel_size; break; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 069155f17edb..3e3a6d01cff6 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -108,15 +108,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, sprctl |= SP_ENABLE; + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, - src_w != crtc_w || src_h != crtc_h); - I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); @@ -263,15 +263,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (IS_HASWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, - src_w != crtc_w || src_h != crtc_h); - /* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled @@ -452,15 +452,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, - src_w != crtc_w || src_h != crtc_h); - dvsscale = 0; if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; -- cgit v1.2.3 From ec4c4aa14720b284af8eadd2d65d5131519fc29f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:15 +0300 Subject: drm/i915: Calculate the sprite WM based on the source width instead of the destination width MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using the destination width in the sprite WM calculations isn't correct. We should be using the source width. Note: This doesn't affect hsw since it does not support sprite scaling. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni [danvet: Add review note from Paulo to the commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 3e3a6d01cff6..5a36afb6ea03 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -108,7 +108,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, sprctl |= SP_ENABLE; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -263,7 +263,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (IS_HASWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -452,7 +452,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true, + intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ -- cgit v1.2.3 From 3658729a72b19f5e1cb92bd972939a13db970168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:16 +0300 Subject: drm/i915: Rename hsw_wm_get_pixel_rate to ilk_pipe_pixel_rate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hsw_wm_get_pixel_rate() isn't specific to HSW. In fact it should be made to handle all gens, but for now it depends on the PCH panel fitter state, so give it an ilk_ prefix. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9ef476b2eeeb..8505e74e3645 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2095,8 +2095,8 @@ static void ivybridge_update_wm(struct drm_device *dev) cursor_wm); } -static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, - struct drm_crtc *crtc) +static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, + struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pixel_rate, pfit_size; @@ -2388,7 +2388,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, pipes_active++; p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; - p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc); + p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8; p->cur_bytes_per_pixel = 4; p->pri_horiz_pixels = -- cgit v1.2.3 From 23297044ac70da5c87b1c1ef7d5cf32c84b2fd00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:17 +0300 Subject: drm/i915: Rename most wm compute functions to ilk_ prefix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions are appropriate for everything since ILK. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8505e74e3645..6259072a288c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2126,7 +2126,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, return pixel_rate; } -static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, +static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, uint32_t latency) { uint64_t ret; @@ -2137,7 +2137,7 @@ static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, return ret; } -static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, +static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, uint32_t horiz_pixels, uint8_t bytes_per_pixel, uint32_t latency) { @@ -2149,7 +2149,7 @@ static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, return ret; } -static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, +static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, uint8_t bytes_per_pixel) { return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; @@ -2198,7 +2198,7 @@ enum hsw_data_buf_partitioning { }; /* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, +static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value, bool is_lp) { @@ -2208,14 +2208,14 @@ static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, if (!params->active) return 0; - method1 = hsw_wm_method1(params->pixel_rate, + method1 = ilk_wm_method1(params->pixel_rate, params->pri_bytes_per_pixel, mem_value); if (!is_lp) return method1; - method2 = hsw_wm_method2(params->pixel_rate, + method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->pri_horiz_pixels, params->pri_bytes_per_pixel, @@ -2225,7 +2225,7 @@ static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, } /* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, +static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { uint32_t method1, method2; @@ -2233,10 +2233,10 @@ static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, if (!params->active || !params->sprite_enabled) return 0; - method1 = hsw_wm_method1(params->pixel_rate, + method1 = ilk_wm_method1(params->pixel_rate, params->spr_bytes_per_pixel, mem_value); - method2 = hsw_wm_method2(params->pixel_rate, + method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->spr_horiz_pixels, params->spr_bytes_per_pixel, @@ -2245,13 +2245,13 @@ static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, } /* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params, +static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { if (!params->active) return 0; - return hsw_wm_method2(params->pixel_rate, + return ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->cur_horiz_pixels, params->cur_bytes_per_pixel, @@ -2259,14 +2259,14 @@ static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params, } /* Only for WM_LP. */ -static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, +static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, uint32_t pri_val, uint32_t mem_value) { if (!params->active) return 0; - return hsw_wm_fbc(pri_val, + return ilk_wm_fbc(pri_val, params->pri_horiz_pixels, params->pri_bytes_per_pixel); } @@ -2281,10 +2281,10 @@ static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { struct hsw_pipe_wm_parameters *p = ¶ms[pipe]; - pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true); - spr_val[pipe] = hsw_compute_spr_wm(p, mem_value); - cur_val[pipe] = hsw_compute_cur_wm(p, mem_value); - fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value); + pri_val[pipe] = ilk_compute_pri_wm(p, mem_value, true); + spr_val[pipe] = ilk_compute_spr_wm(p, mem_value); + cur_val[pipe] = ilk_compute_cur_wm(p, mem_value); + fbc_val[pipe] = ilk_compute_fbc_wm(p, pri_val[pipe], mem_value); } result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); @@ -2311,9 +2311,9 @@ static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, { uint32_t pri_val, cur_val, spr_val; - pri_val = hsw_compute_pri_wm(params, mem_value, false); - spr_val = hsw_compute_spr_wm(params, mem_value); - cur_val = hsw_compute_cur_wm(params, mem_value); + pri_val = ilk_compute_pri_wm(params, mem_value, false); + spr_val = ilk_compute_spr_wm(params, mem_value); + cur_val = ilk_compute_cur_wm(params, mem_value); WARN(pri_val > 127, "Primary WM error, mode not supported for pipe %c\n", -- cgit v1.2.3 From 1fda9882ca0ba134134c5bf04b8d4f4f06b52649 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:19 +0300 Subject: drm/i915: Don't pass "mem_value" to ilk_compute_fbc_wm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The FBC watermark doesn't depend on the latency value, so no point in passing it in. Note: It actually depends upon the latency, but only through priv_val ... Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni [danvet: Add review comment from Paulo to the commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6259072a288c..1a80787f4258 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2260,8 +2260,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, /* Only for WM_LP. */ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, - uint32_t pri_val, - uint32_t mem_value) + uint32_t pri_val) { if (!params->active) return 0; @@ -2284,7 +2283,7 @@ static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, pri_val[pipe] = ilk_compute_pri_wm(p, mem_value, true); spr_val[pipe] = ilk_compute_spr_wm(p, mem_value); cur_val[pipe] = ilk_compute_cur_wm(p, mem_value); - fbc_val[pipe] = ilk_compute_fbc_wm(p, pri_val[pipe], mem_value); + fbc_val[pipe] = ilk_compute_fbc_wm(p, pri_val[pipe]); } result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); -- cgit v1.2.3 From 888fd1594e38c21f8dc5aa28b90a556df32f61e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:20 +0300 Subject: drm/i915: Change the watermark latency type to uint16_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The latency values fit in uint16_t, so let's save a few bytes. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1a80787f4258..faa4ef6c33c4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2353,7 +2353,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) static void hsw_compute_wm_parameters(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint32_t *wm, + uint16_t *wm, struct hsw_wm_maximums *lp_max_1_2, struct hsw_wm_maximums *lp_max_5_6) { @@ -2426,7 +2426,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, static void hsw_compute_wm_results(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint32_t *wm, + uint16_t *wm, struct hsw_wm_maximums *lp_maximums, struct hsw_wm_values *results) { @@ -2608,7 +2608,7 @@ static void haswell_update_wm(struct drm_device *dev) struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; struct hsw_pipe_wm_parameters params[3]; struct hsw_wm_values results_1_2, results_5_6, *best_results; - uint32_t wm[5]; + uint16_t wm[5]; enum hsw_data_buf_partitioning partitioning; hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6); -- cgit v1.2.3 From 12b134df4e42ea1ac141388e563346777f8a1605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:21 +0300 Subject: drm/i915: Split out reading of HSW watermark latency values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move parsing of MCH_SSKPD to a separate function, we'll add other platforms there later. Note: Chris spotted an empty struct initializer and wondered whether that is hiding a compilier warning. Ville explained that it should have been part of the patch that extends this function to snb/ivb, which don't have all levels hsw has. I've figured it's ok to keep it here with a small note. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni [danvet: Add note about the ominous struct initializer.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index faa4ef6c33c4..b8ec1433531d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2351,28 +2351,33 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) PIPE_WM_LINETIME_TIME(linetime); } +static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_HASWELL(dev)) { + uint64_t sskpd = I915_READ64(MCH_SSKPD); + + wm[0] = (sskpd >> 56) & 0xFF; + if (wm[0] == 0) + wm[0] = sskpd & 0xF; + wm[1] = ((sskpd >> 4) & 0xFF) * 5; + wm[2] = ((sskpd >> 12) & 0xFF) * 5; + wm[3] = ((sskpd >> 20) & 0x1FF) * 5; + wm[4] = ((sskpd >> 32) & 0x1FF) * 5; + } +} + static void hsw_compute_wm_parameters(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint16_t *wm, struct hsw_wm_maximums *lp_max_1_2, struct hsw_wm_maximums *lp_max_5_6) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct drm_plane *plane; - uint64_t sskpd = I915_READ64(MCH_SSKPD); enum pipe pipe; int pipes_active = 0, sprites_enabled = 0; - if ((sskpd >> 56) & 0xFF) - wm[0] = (sskpd >> 56) & 0xFF; - else - wm[0] = sskpd & 0xF; - wm[1] = ((sskpd >> 4) & 0xFF) * 5; - wm[2] = ((sskpd >> 12) & 0xFF) * 5; - wm[3] = ((sskpd >> 20) & 0x1FF) * 5; - wm[4] = ((sskpd >> 32) & 0x1FF) * 5; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct hsw_pipe_wm_parameters *p; @@ -2608,10 +2613,11 @@ static void haswell_update_wm(struct drm_device *dev) struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; struct hsw_pipe_wm_parameters params[3]; struct hsw_wm_values results_1_2, results_5_6, *best_results; - uint16_t wm[5]; + uint16_t wm[5] = {}; enum hsw_data_buf_partitioning partitioning; - hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6); + intel_read_wm_latency(dev, wm); + hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2); if (lp_max_1_2.pri != lp_max_5_6.pri) { -- cgit v1.2.3 From e5d5019e95415a99b1c0bca3dab6d8fcd39f4c65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:22 +0300 Subject: drm/i915: Don't multiply the watermark latency values too early MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LP1+ watermark latency values need to be multiplied by 5 to make them suitable for watermark calculations. However on pre-HSW platforms we're going to need the raw value later when we have to write it to the WM_LPn registers' latency field. So delay the multiplication until it's needed. Note: Paulo complains that the units of wm (now in 100ns) aren't really clear and I agree. But that can be fixed later on ... Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni [danvet: Add a comment about the unit obfuscation.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b8ec1433531d..b6430bacc7dc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2361,10 +2361,10 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) wm[0] = (sskpd >> 56) & 0xFF; if (wm[0] == 0) wm[0] = sskpd & 0xF; - wm[1] = ((sskpd >> 4) & 0xFF) * 5; - wm[2] = ((sskpd >> 12) & 0xFF) * 5; - wm[3] = ((sskpd >> 20) & 0x1FF) * 5; - wm[4] = ((sskpd >> 32) & 0x1FF) * 5; + wm[1] = (sskpd >> 4) & 0xFF; + wm[2] = (sskpd >> 12) & 0xFF; + wm[3] = (sskpd >> 20) & 0x1FF; + wm[4] = (sskpd >> 32) & 0x1FF; } } @@ -2442,7 +2442,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, int level, max_level, wm_lp; for (level = 1; level <= 4; level++) - if (!hsw_compute_lp_wm(wm[level], lp_maximums, params, + if (!hsw_compute_lp_wm(wm[level] * 5, lp_maximums, params, &lp_results[level - 1])) break; max_level = level - 1; -- cgit v1.2.3 From 63cf9a131ee60fa2458d75f5c0d7a3a5dcaa2b3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 5 Jul 2013 11:57:23 +0300 Subject: drm/i915: Add SNB/IVB support to intel_read_wm_latency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SNB and IVB have slightly a different way to read out the watermark latency values. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b6430bacc7dc..da1b64121611 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2365,6 +2365,13 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) wm[2] = (sskpd >> 12) & 0xFF; wm[3] = (sskpd >> 20) & 0x1FF; wm[4] = (sskpd >> 32) & 0x1FF; + } else if (INTEL_INFO(dev)->gen >= 6) { + uint32_t sskpd = I915_READ(MCH_SSKPD); + + wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; + wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; + wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; + wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; } } -- cgit v1.2.3 From b6dfdc9b7f0d7859ea146b6c869aa2cfe6d713f3 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 25 Jul 2013 10:06:50 -0700 Subject: drm/i915: enable IPS for bpp <= 24 Art confirms that this should work fine. Since most panels are 18bpp with dithering from 24bpp, the existing code wouldn't be enabled in most cases. Signed-off-by: Jesse Barnes Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d82e225294a6..dc7ad2a4a14d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4091,7 +4091,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, { pipe_config->ips_enabled = i915_enable_ips && hsw_crtc_supports_ips(crtc) && - pipe_config->pipe_bpp == 24; + pipe_config->pipe_bpp <= 24; } static int intel_crtc_compute_config(struct intel_crtc *crtc, -- cgit v1.2.3 From 0980a60fba7a9afa3259390e8af16b6ce486858a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 26 Jul 2013 19:57:35 +0100 Subject: drm/i915: Acquire dpio_lock for VLV sideband programming in DP/HDMI Otherwise we get flooded by the kernel warning us that we are doing long sequences of IO without serialisation. For example, WARNING: CPU: 0 PID: 11136 at drivers/gpu/drm/i915/intel_sideband.c:40 vlv_sideband_rw+0x48/0x1ef() Modules linked in: CPU: 0 PID: 11136 Comm: kworker/u2:0 Tainted: G W 3.11.0-rc2+ #4 Call Trace: [] ? warn_slowpath_common+0x63/0x78 [] ? vlv_sideband_rw+0x48/0x1ef [] ? warn_slowpath_null+0xf/0x13 [] ? vlv_sideband_rw+0x48/0x1ef [] ? vlv_dpio_write+0x1c/0x21 [] ? intel_dp_set_signal_levels+0x24a/0x385 [] ? intel_dp_complete_link_train+0x25/0x1d1 [] ? intel_dp_check_link_status+0xf7/0x106 [] ? i915_hotplug_work_func+0x17b/0x221 [] ? process_one_work+0x12e/0x210 [] ? worker_thread+0x116/0x1ad [] ? rescuer_thread+0x1cb/0x1cb [] ? kthread+0x67/0x6c [] ? ret_from_kernel_thread+0x1b/0x30 [] ? init_completion+0x18/0x18 v2: Retire the locking in vlv_crtc_enable() and do it close to the meat. Signed-off-by: Chris Wilson Reviewed-by: Jani Nikula [danvet: Squash in a s/mutex_lock/mutex_unlock/ fixup spotted by the 0 day kernel build/coccinelle and reported by Dan Carpenter.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ---- drivers/gpu/drm/i915/intel_dp.c | 6 ++++++ drivers/gpu/drm/i915/intel_hdmi.c | 4 ++++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dc7ad2a4a14d..9c59b42ee544 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3652,8 +3652,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - mutex_lock(&dev_priv->dpio_lock); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); @@ -3678,8 +3676,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, true); intel_update_fbc(dev); - - mutex_unlock(&dev_priv->dpio_lock); } static void i9xx_crtc_enable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e5eeec30c9bb..d96360d5d031 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1727,6 +1727,7 @@ static void intel_pre_enable_dp(struct intel_encoder *encoder) int pipe = intel_crtc->pipe; u32 val; + mutex_lock(&dev_priv->dpio_lock); val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); val = 0; if (pipe) @@ -1740,6 +1741,7 @@ static void intel_pre_enable_dp(struct intel_encoder *encoder) 0x00760018); vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); + mutex_unlock(&dev_priv->dpio_lock); } } @@ -1754,6 +1756,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) return; /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); @@ -1767,6 +1770,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); + mutex_unlock(&dev_priv->dpio_lock); } /* @@ -1978,6 +1982,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) return 0; } + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), @@ -1986,6 +1991,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); + mutex_unlock(&dev_priv->dpio_lock); return 0; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index c16fb34879c1..847ff20cc9a0 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1019,6 +1019,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) return; /* Enable clock channels for this port */ + mutex_lock(&dev_priv->dpio_lock); val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); val = 0; if (pipe) @@ -1049,6 +1050,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) 0x00760018); vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); + mutex_unlock(&dev_priv->dpio_lock); } static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) @@ -1062,6 +1064,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) return; /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); @@ -1080,6 +1083,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 0x00002000); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), DPIO_TX_OCALINIT_EN); + mutex_unlock(&dev_priv->dpio_lock); } static void intel_hdmi_post_disable(struct intel_encoder *encoder) -- cgit v1.2.3 From ab1f90f9662482021fddd0e7868005401f62866f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 30 Jul 2013 12:20:30 +0300 Subject: drm/i915: rearrange vlv dp enable and pre_enable callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VLV wants encoder enabling before the pipe is up. This is currently achieved through calling the ->enable callback early, right after the ->pre_enable callback, in valleyview_crtc_enable(). This loses both the distinction between ->pre_enable and ->enable on VLV and the possibility to use a hook at the end of the modeset sequence. Rearrange the DP callbacks to make it possible to move ->enable call later. Basically do everything in ->pre_enable on VLV, and make ->enable a NOP. There should be no functional changes. v2: Rebase. v3: Explain why this is needed in the commit message (Chris). Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 73 ++++++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d96360d5d031..63b6722d4285 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1700,49 +1700,50 @@ static void intel_enable_dp(struct intel_encoder *encoder) intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); +} - if (IS_VALLEYVIEW(dev)) { - struct intel_digital_port *dport = - enc_to_dig_port(&encoder->base); - int channel = vlv_dport_to_channel(dport); - - vlv_wait_port_ready(dev_priv, channel); - } +static void vlv_enable_dp(struct intel_encoder *encoder) +{ } static void intel_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + + if (dport->port == PORT_A) + ironlake_edp_pll_on(intel_dp); +} + +static void vlv_pre_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + int port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + u32 val; - if (dport->port == PORT_A && !IS_VALLEYVIEW(dev)) - ironlake_edp_pll_on(intel_dp); + mutex_lock(&dev_priv->dpio_lock); - if (IS_VALLEYVIEW(dev)) { - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - int port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - - mutex_lock(&dev_priv->dpio_lock); - val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); + val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); + val = 0; + if (pipe) + val |= (1<<21); + else + val &= ~(1<<21); + val |= 0x001000c4; + vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); + vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); + vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), - 0x00760018); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), - 0x00400888); - mutex_unlock(&dev_priv->dpio_lock); - } + mutex_unlock(&dev_priv->dpio_lock); + + intel_enable_dp(encoder); + + vlv_wait_port_ready(dev_priv, port); } static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) @@ -3522,14 +3523,18 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->mode_set = intel_dp_mode_set; - intel_encoder->enable = intel_enable_dp; - intel_encoder->pre_enable = intel_pre_enable_dp; intel_encoder->disable = intel_disable_dp; intel_encoder->post_disable = intel_post_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev)) { intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; + intel_encoder->pre_enable = vlv_pre_enable_dp; + intel_encoder->enable = vlv_enable_dp; + } else { + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->enable = intel_enable_dp; + } intel_dig_port->port = port; intel_dig_port->dp.output_reg = output_reg; -- cgit v1.2.3 From b76cf76bfa76246c8acce104de8f2fdd001069fb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 30 Jul 2013 12:20:31 +0300 Subject: drm/i915: rearrange vlv hdmi enable and pre_enable callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VLV wants encoder enabling before the pipe is up. This is currently achieved through calling the ->enable callback early, right after the ->pre_enable callback, in valleyview_crtc_enable(). This loses both the distinction between ->pre_enable and ->enable on VLV and the possibility to use a hook at the end of the modeset sequence. Rearrange the HDMI callbacks to make it possible to move ->enable call later. Basically do everything in ->pre_enable on VLV, and make ->enable a NOP. There should be no functional changes. v2: Rebase. v3: Explain why this is needed in the commit message (Chris). Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 847ff20cc9a0..5cad59fd6bd2 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -718,14 +718,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); } +} - if (IS_VALLEYVIEW(dev)) { - struct intel_digital_port *dport = - enc_to_dig_port(&encoder->base); - int channel = vlv_dport_to_channel(dport); - - vlv_wait_port_ready(dev_priv, channel); - } +static void vlv_enable_hdmi(struct intel_encoder *encoder) +{ } static void intel_disable_hdmi(struct intel_encoder *encoder) @@ -1051,6 +1047,10 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); mutex_unlock(&dev_priv->dpio_lock); + + intel_enable_hdmi(encoder); + + vlv_wait_port_ready(dev_priv, port); } static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) @@ -1231,14 +1231,16 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->compute_config = intel_hdmi_compute_config; intel_encoder->mode_set = intel_hdmi_mode_set; - intel_encoder->enable = intel_enable_hdmi; intel_encoder->disable = intel_disable_hdmi; intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_VALLEYVIEW(dev)) { - intel_encoder->pre_enable = intel_hdmi_pre_enable; intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; + intel_encoder->pre_enable = intel_hdmi_pre_enable; + intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = intel_hdmi_post_disable; + } else { + intel_encoder->enable = intel_enable_hdmi; } intel_encoder->type = INTEL_OUTPUT_HDMI; -- cgit v1.2.3 From 5004945f1d6c0282c0288afa89ad85d7f2bea4d5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 30 Jul 2013 12:20:32 +0300 Subject: drm/i915: move encoder->enable callback later in VLV crtc enable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VLV wants encoder enabling before the pipe is up. With the previously rearranged VLV DP and HDMI ->pre_enable and ->enable callbacks in place, this no longer depends on the early ->enable hook call. Move the ->enable call at the end of the sequence, similar to the crtc enable on other platforms. This will be needed e.g. for moving the eDP backlight enabling to the right place in the sequence, currently done too early on VLV. There should be no functional changes. v2: Rebase. v3: Explain why this is needed in the commit message (Chris). Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9c59b42ee544..5ef37a74e1c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3662,10 +3662,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_enable) encoder->pre_enable(encoder); - /* VLV wants encoder enabling _before_ the pipe is up. */ - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); @@ -3676,6 +3672,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, true); intel_update_fbc(dev); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); } static void i9xx_crtc_enable(struct drm_crtc *crtc) -- cgit v1.2.3 From 2960bc9cceecb5d556ce1c07656a6609e2f7e8b0 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 30 Jul 2013 13:36:32 +0300 Subject: drm/i915: make user mode sync polarity setting explicit Userspace can pass a mode with an unspecified vsync/hsync polarity setting. All encoders in the Intel driver take this to mean a negative polarity setting. The HW readout/state checker code on the other hand needs these flags to be explicitly set, otherwise the state checker will WARN about the mismatch. Get rid of the WARN by making the polarity setting explicit in the adjusted mode flags based on the requested mode flags. This will keep the existing behavior otherwise. Note that we could guess from the other timing parameters whether the user wanted a VESA or other standard mode and set the polarity accordingly. This is what the NV driver does (drivers/gpu/drm/nouveau/dispnv04/crtc.c), but I think that's not very exact and would change the existing behavior of the Intel driver. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65442 Signed-off-by: Imre Deak Tested-by: cancan,feng Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ef37a74e1c7..347e45a2b8fb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8061,6 +8061,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, (enum transcoder) to_intel_crtc(crtc)->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; + /* + * Sanitize sync polarity flags based on requested ones. If neither + * positive or negative polarity is requested, treat this as meaning + * negative polarity. + */ + if (!(pipe_config->adjusted_mode.flags & + (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; + + if (!(pipe_config->adjusted_mode.flags & + (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; + /* Compute a starting value for pipe_config->pipe_bpp taking the source * plane pixel format and any sink constraints into account. Returns the * source plane bpp so that dithering can be selected on mismatches -- cgit v1.2.3 From 6d5c2d8ca3c15a191a8078316e547c1f4e5ad6eb Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Thu, 1 Aug 2013 18:21:28 +0200 Subject: i915: fix ACPI _DSM warning Since commit 29a241c (ACPICA: Add argument typechecking for all predefined ACPI names), _DSM parameters are validated which trigger the following warning: ACPI Warning: \_SB_.PCI0.GFX0._DSM: Argument #4 type mismatch - Found [Integer], ACPI requires [Package] (20130517/nsarguments-95) ACPI Warning: \_SB_.PCI0.GFX0._DSM: Argument #4 type mismatch - Found [Integer], ACPI requires [Package] (20130517/nsarguments-95) ACPI Warning: \_SB_.PCI0.P0P2.PEGP._DSM: Argument #4 type mismatch - Found [Integer], ACPI requires [Package] (20130517/nsarguments-95) ACPI Warning: \_SB_.PCI0.P0P2.PEGP._DSM: Argument #4 type mismatch - Found [Integer], ACPI requires [Package] (20130517/nsarguments-95) As the Intel _DSM method seems to ignore this parameter, let's comply to the ACPI spec and use a Package instead. Signed-off-by: Peter Wu Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=32602 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_acpi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index bcbbaea2a78e..57fe1ae32a0d 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = { 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c }; -static int intel_dsm(acpi_handle handle, int func, int arg) +static int intel_dsm(acpi_handle handle, int func) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; @@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg) params[1].integer.value = INTEL_DSM_REVISION_ID; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; - params[3].type = ACPI_TYPE_INTEGER; - params[3].integer.value = arg; + params[3].type = ACPI_TYPE_PACKAGE; + params[3].package.count = 0; + params[3].package.elements = NULL; ret = acpi_evaluate_object(handle, "_DSM", &input, &output); if (ret) { @@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void) params[1].integer.value = INTEL_DSM_REVISION_ID; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; - params[3].type = ACPI_TYPE_INTEGER; - params[3].integer.value = 0; + params[3].type = ACPI_TYPE_PACKAGE; + params[3].package.count = 0; + params[3].package.elements = NULL; ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, &output); @@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) return false; } - ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS); if (ret < 0) { DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); return false; -- cgit v1.2.3 From 87a6b688ccc78b2c54bee56879c6d195d2457ebe Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sun, 4 Aug 2013 23:47:29 -0700 Subject: drm/i915/hsw: Change default LLC age to 3 The default LLC age was changed: commit 0d8ff15e9a15f2b393e53337a107b7a1e5919b6d Author: Ben Widawsky Date: Thu Jul 4 11:02:03 2013 -0700 drm/i915/hsw: Set correct Haswell PTE encodings. On the surface it would seem setting a default age wouldn't matter because all GEM BOs are aged similarly, so the order in which objects are evicted would not be subject to aging. The current working theory as to why this caused a regression though is that LLC is a bit special in that it is shared with the CPU. Presumably (not verified) the CPU fetches cachelines with age 3, and therefore recently cached GPU objects would be evicted before similar CPU object first when the LLC is full. It stands to reason therefore that this would negatively impact CPU bound benchmarks - but those seem to be low on the priority list. eLLC OTOH does not have this same property as LLC. It should be used entirely for the GPU, and so the age really shouldn't matter. Furthermore, we have no evidence to suggest one is better than another on eLLC. Since we've never properly supported eLLC before no, there should be no regression. If the GPU client really wants "younger" objects, they should use MOCS. v2: Drop the extra #define (Chad) v3: Actually git add v4: Pimped commit message Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67062 Signed-off-by: Ben Widawsky Reviewed-by: Chad Versace Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e7b420495516..3e7f1242af91 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -52,6 +52,7 @@ */ #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ (((bits) & 0x8) << (11 - 3))) +#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) @@ -105,7 +106,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) - pte |= HSW_WB_LLC_AGE0; + pte |= HSW_WB_LLC_AGE3; return pte; } -- cgit v1.2.3 From fc8c067eee712b274e554be5cc87c79366cc5ad2 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:54 -0700 Subject: drm/i915: Create an init vm Move all the similar address space (VM) initialization code to one function. Until we have multiple VMs, there should only ever be 1 VM. The aliasing ppgtt is a special case without it's own VM (since it doesn't need it's own address space management). Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 4 ---- drivers/gpu/drm/i915/i915_gem.c | 15 +++++++++++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f48f1c476977..ce098c3ccc00 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1485,10 +1485,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) i915_dump_device_info(dev_priv); - INIT_LIST_HEAD(&dev_priv->vm_list); - INIT_LIST_HEAD(&dev_priv->gtt.base.global_link); - list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list); - if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 35d17fb1b89b..4e8a6d4815fa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4340,6 +4340,16 @@ init_ring_lists(struct intel_ring_buffer *ring) INIT_LIST_HEAD(&ring->request_list); } +static void i915_init_vm(struct drm_i915_private *dev_priv, + struct i915_address_space *vm) +{ + vm->dev = dev_priv->dev; + INIT_LIST_HEAD(&vm->active_list); + INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->global_link); + list_add(&vm->global_link, &dev_priv->vm_list); +} + void i915_gem_load(struct drm_device *dev) { @@ -4352,8 +4362,9 @@ i915_gem_load(struct drm_device *dev) SLAB_HWCACHE_ALIGN, NULL); - INIT_LIST_HEAD(&dev_priv->gtt.base.active_list); - INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list); + INIT_LIST_HEAD(&dev_priv->vm_list); + i915_init_vm(dev_priv, &dev_priv->gtt.base); + INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); -- cgit v1.2.3 From 31a46c9c092afc6558e7be7eaa42eb9bd4d3de8b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:55 -0700 Subject: drm/i915: Rework drop caches for checkpatch With an upcoming change to bind, to make checkpatch happy and keep the code clean, we need to rework this code a bit. This should have no functional impact. Signed-off-by: Ben Widawsky [danvet: Add the newline Chris requested.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eed2f4ca9a76..04debcedac2d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1784,12 +1784,14 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_BOUND) { list_for_each_entry_safe(obj, next, &vm->inactive_list, - mm_list) - if (obj->pin_count == 0) { - ret = i915_gem_object_unbind(obj); - if (ret) - goto unlock; - } + mm_list) { + if (obj->pin_count) + continue; + + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; + } } if (val & DROP_UNBOUND) { -- cgit v1.2.3 From a70a3148b0c61cb7c588ea650db785b261b378a3 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:56 -0700 Subject: drm/i915: Make proper functions for VMs Earlier in the conversion sequence we attempted to quickly wedge in the transitional interface as static inlines. Now that we're sure these interfaces are sane, for easier debug and to decrease code size (since many of these functions may be called quite a bit), make them real functions While at it, kill off the set_color interface. We'll always have the VMA, or easily get to it. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 83 ++++++++++++++++------------------- drivers/gpu/drm/i915/i915_gem.c | 78 ++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_gem_evict.c | 8 ++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- 4 files changed, 118 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 67a15d00d5f2..79d4fed9d066 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1393,52 +1393,6 @@ struct drm_i915_gem_object { #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -/* This is a temporary define to help transition us to real VMAs. If you see - * this, you're either reviewing code, or bisecting it. */ -static inline struct i915_vma * -__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj) -{ - if (list_empty(&obj->vma_list)) - return NULL; - return list_first_entry(&obj->vma_list, struct i915_vma, vma_link); -} - -/* Whether or not this object is currently mapped by the translation tables */ -static inline bool -i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) -{ - struct i915_vma *vma = __i915_gem_obj_to_vma(o); - if (vma == NULL) - return false; - return drm_mm_node_allocated(&vma->node); -} - -/* Offset of the first PTE pointing to this object */ -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) -{ - BUG_ON(list_empty(&o->vma_list)); - return __i915_gem_obj_to_vma(o)->node.start; -} - -/* The size used in the translation tables may be larger than the actual size of - * the object on GEN2/GEN3 because of the way tiling is handled. See - * i915_gem_get_gtt_size() for more details. - */ -static inline unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) -{ - BUG_ON(list_empty(&o->vma_list)); - return __i915_gem_obj_to_vma(o)->node.size; -} - -static inline void -i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, - enum i915_cache_level color) -{ - __i915_gem_obj_to_vma(o)->node.color = color; -} - /** * Request queue structure. * @@ -1906,6 +1860,43 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); +unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +/* Some GGTT VM helpers */ +#define obj_to_ggtt(obj) \ + (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) +static inline bool i915_is_ggtt(struct i915_address_space *vm) +{ + struct i915_address_space *ggtt = + &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; + return vm == ggtt; +} + +static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); +} + +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); +} + +static inline unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_size(obj, obj_to_ggtt(obj)); +} +#undef obj_to_ggtt + /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4e8a6d4815fa..9e2d0f126be1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2631,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; - vma = __i915_gem_obj_to_vma(obj); + vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); list_del(&vma->vma_link); drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); @@ -3319,7 +3319,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_vma *vma = __i915_gem_obj_to_vma(obj); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); int ret; if (obj->cache_level == cache_level) @@ -3359,7 +3359,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - i915_gem_obj_ggtt_set_color(obj, cache_level); + i915_gem_obj_to_vma(obj, &dev_priv->gtt.base)->node.color = cache_level; } if (cache_level == I915_CACHE_NONE) { @@ -4672,3 +4672,75 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) mutex_unlock(&dev->struct_mutex); return cnt; } + +/* All the new VM stuff */ +unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; + + if (vm == &dev_priv->mm.aliasing_ppgtt->base) + vm = &dev_priv->gtt.base; + + BUG_ON(list_empty(&o->vma_list)); + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (vma->vm == vm) + return vma->node.start; + + } + return -1; +} + +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == vm) + return true; + + return false; +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_address_space *vm; + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + if (i915_gem_obj_bound(o, vm)) + return true; + + return false; +} + +unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; + + if (vm == &dev_priv->mm.aliasing_ppgtt->base) + vm = &dev_priv->gtt.base; + + BUG_ON(list_empty(&o->vma_list)); + + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == vm) + return vma->node.size; + + return 0; +} + +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == vm) + return vma; + + return NULL; +} diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index df61f338dea1..33d85a4447a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -34,7 +34,9 @@ static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { - struct i915_vma *vma = __i915_gem_obj_to_vma(obj); + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); if (obj->pin_count) return false; @@ -109,7 +111,7 @@ none: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - vma = __i915_gem_obj_to_vma(obj); + vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); @@ -130,7 +132,7 @@ found: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - vma = __i915_gem_obj_to_vma(obj); + vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); if (drm_mm_scan_remove_block(&vma->node)) { list_move(&obj->exec_list, &eviction_list); drm_gem_object_reference(&obj->base); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3e7f1242af91..90a276e35909 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -657,7 +657,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = __i915_gem_obj_to_vma(obj); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); int ret; DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", i915_gem_obj_ggtt_offset(obj), obj->base.size); -- cgit v1.2.3 From fcb4a57805e04dee04f736c25a5648ec7bebe30f Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:57 -0700 Subject: drm/i915: Use bound list for inactive shrink Do to the move active/inactive lists, it no longer makes sense to use them for shrinking, since shrinking isn't VM specific (such a need may also exist, but doesn't yet). What we can do instead is use the global bound list to find all objects which aren't active. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9e2d0f126be1..c9de97ac1d08 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4635,7 +4635,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; - struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; bool unlock = true; @@ -4664,9 +4663,14 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &vm->inactive_list, mm_list) + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (obj->active) + continue; + if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; + } if (unlock) mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From c37e22046148971a35a89931aa1f951bb99d5514 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:58 -0700 Subject: drm/i915: Add VM to pin To verbalize it, one can say, "pin an object into the given address space." The semantics of pinning remain the same otherwise. Certain objects will always have to be bound into the global GTT. Therefore, global GTT is a special case, and keep a special interface around for it (i915_gem_obj_ggtt_pin). v2: s/i915_gem_ggtt_pin/i915_gem_obj_ggtt_pin Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 11 +++++++++++ drivers/gpu/drm/i915/i915_gem.c | 9 +++++---- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +++- drivers/gpu/drm/i915/intel_overlay.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++---- 7 files changed, 27 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 79d4fed9d066..a8b51d525f8c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1710,6 +1710,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, void i915_gem_vma_destroy(struct i915_vma *vma); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, uint32_t alignment, bool map_and_fenceable, bool nonblocking); @@ -1895,6 +1896,16 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) { return i915_gem_obj_size(obj, obj_to_ggtt(obj)); } + +static inline int __must_check +i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable, + bool nonblocking) +{ + return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, + map_and_fenceable, nonblocking); +} #undef obj_to_ggtt /* i915_gem_context.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c9de97ac1d08..8322dbe3ff16 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -592,7 +592,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, char __user *user_data; int page_offset, page_length, ret; - ret = i915_gem_object_pin(obj, 0, true, true); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); if (ret) goto out; @@ -1346,7 +1346,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* Now bind it into the GTT if needed */ - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); if (ret) goto unlock; @@ -3488,7 +3488,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_object_pin(obj, alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); if (ret) return ret; @@ -3631,6 +3631,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) int i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, uint32_t alignment, bool map_and_fenceable, bool nonblocking) @@ -3720,7 +3721,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } if (obj->user_pin_count == 0) { - ret = i915_gem_object_pin(obj, args->alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); if (ret) goto out; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 2470206a4d07..d1cb28cbc71e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) * default context. */ dev_priv->ring[RCS].default_context = ctx; - ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); + ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); goto err_destroy; @@ -400,7 +400,7 @@ static int do_switch(struct i915_hw_context *to) if (from == to) return 0; - ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); + ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5b6d764e9bb2..7addab31783f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -409,7 +409,9 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); + /* FIXME: vm plubming */ + ret = i915_gem_object_pin(obj, &dev_priv->gtt.base, entry->alignment, + need_mappable, false); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 9ec5a4e12af2..ddfd0aefe0c0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1352,7 +1352,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index da1b64121611..4c4020631b36 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2886,7 +2886,7 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } - ret = i915_gem_object_pin(ctx, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8527ea05124b..74d02a704515 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -501,7 +501,7 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret) goto err_unref; @@ -1224,7 +1224,7 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } @@ -1307,7 +1307,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; @@ -1828,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return -ENOMEM; } - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); if (ret != 0) { drm_gem_object_unreference(&obj->base); DRM_ERROR("Failed to ping batch bo\n"); -- cgit v1.2.3 From 40d74980d3ada5ad76e333dfcc87645f3f7e9820 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 16:59:59 -0700 Subject: drm/i915: Use ggtt_vm to save some typing Just some small cleanups, and a rename of vm->ggtt_vm requested by Daniel. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 19 ++++++++----------- drivers/gpu/drm/i915/i915_gem_stolen.c | 10 +++++----- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 90a276e35909..f38cc696be7f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -643,7 +643,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; struct drm_mm_node *entry; struct drm_i915_gem_object *obj; unsigned long hole_start, hole_end; @@ -651,19 +652,19 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, BUG_ON(mappable_end > end); /* Subtract the guard page ... */ - drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE); + drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); if (!HAS_LLC(dev)) dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); int ret; DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", i915_gem_obj_ggtt_offset(obj), obj->base.size); WARN_ON(i915_gem_obj_ggtt_bound(obj)); - ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); + ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); if (ret) DRM_DEBUG_KMS("Reservation failed\n"); obj->has_global_gtt_mapping = 1; @@ -674,19 +675,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, dev_priv->gtt.base.total = end - start; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm, - hole_start, hole_end) { + drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - hole_start / PAGE_SIZE, - count); + ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); } /* And finally clear the reserved guard page */ - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - end / PAGE_SIZE - 1, 1); + ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); } static bool diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 4bbde2ae1819..934840860c6d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -333,7 +333,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 size) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_address_space *ggtt = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; @@ -376,7 +376,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; - vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); + vma = i915_gem_vma_create(obj, ggtt); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_out; @@ -389,8 +389,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, */ vma->node.start = gtt_offset; vma->node.size = size; - if (drm_mm_initialized(&dev_priv->gtt.base.mm)) { - ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); + if (drm_mm_initialized(&ggtt->mm)) { + ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); i915_gem_vma_destroy(vma); @@ -401,7 +401,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, obj->has_global_gtt_mapping = 1; list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &vm->inactive_list); + list_add_tail(&obj->mm_list, &ggtt->inactive_list); return obj; -- cgit v1.2.3 From 1d693bcc37461a66fafd13ff171c4496aee0df98 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:00 -0700 Subject: drm/i915: Update describe_obj Make it aware of which domain it is bound into GGTT, or PPGTT. While modifying the function, add a global gtt flag to the object description. Global is more interesting than aliasing since aliasing is the default. v2: Access VMA directly for start/size instead of helpers (Daniel) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 04debcedac2d..748af58b0cea 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -89,13 +89,20 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) } } +static inline const char *get_global_flag(struct drm_i915_gem_object *obj) +{ + return obj->has_global_gtt_mapping ? "g" : " "; +} + static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + struct i915_vma *vma; + seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), + get_global_flag(obj), obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, @@ -111,9 +118,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", obj->pin_count); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - if (i915_gem_obj_ggtt_bound(obj)) - seq_printf(m, " (gtt offset: %08lx, size: %08x)", - i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj)); + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!i915_is_ggtt(vma->vm)) + seq_puts(m, " (pp"); + else + seq_puts(m, " (g"); + seq_printf(m, "gtt offset: %08lx, size: %08lx)", + vma->node.start, vma->node.size); + } if (obj->stolen) seq_printf(m, " (stolen: %08lx)", obj->stolen->start); if (obj->pin_mappable || obj->fault_mappable) { -- cgit v1.2.3 From 28d6a7bfa2560cb94727a68511ed68561e84dcc8 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:02 -0700 Subject: drm/i915: thread address space through execbuf This represents the first half of hooking up VMs to execbuf. Here we basically pass an address space all around to the different internal functions. It should be much more readable, and have less risk than the second half, which begins switching over to using VMAs instead of an obj,vm. The overall series echoes this style of, "add a VM, then make it smart later" Signed-off-by: Ben Widawsky [danvet: Switch a BUG_ON to WARN_ON.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 77 +++++++++++++++++++----------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7addab31783f..9939d2ef3ea9 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -174,7 +174,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + struct i915_address_space *vm) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; @@ -297,7 +298,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct eb_objects *eb) + struct eb_objects *eb, + struct i915_address_space *vm) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; @@ -321,7 +323,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, + vm); if (ret) return ret; @@ -344,13 +347,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *relocs) + struct drm_i915_gem_relocation_entry *relocs, + struct i915_address_space *vm) { const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], + vm); if (ret) return ret; } @@ -359,7 +364,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, } static int -i915_gem_execbuffer_relocate(struct eb_objects *eb) +i915_gem_execbuffer_relocate(struct eb_objects *eb, + struct i915_address_space *vm) { struct drm_i915_gem_object *obj; int ret = 0; @@ -373,7 +379,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb) */ pagefault_disable(); list_for_each_entry(obj, &eb->objects, exec_list) { - ret = i915_gem_execbuffer_relocate_object(obj, eb); + ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); if (ret) break; } @@ -395,6 +401,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, + struct i915_address_space *vm, bool *need_reloc) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; @@ -409,9 +416,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - /* FIXME: vm plubming */ - ret = i915_gem_object_pin(obj, &dev_priv->gtt.base, entry->alignment, - need_mappable, false); + ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, + false); if (ret) return ret; @@ -438,8 +444,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->has_aliasing_ppgtt_mapping = 1; } - if (entry->offset != i915_gem_obj_ggtt_offset(obj)) { - entry->offset = i915_gem_obj_ggtt_offset(obj); + if (entry->offset != i915_gem_obj_offset(obj, vm)) { + entry->offset = i915_gem_obj_offset(obj, vm); *need_reloc = true; } @@ -477,6 +483,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *objects, + struct i915_address_space *vm, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -531,32 +538,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; + u32 obj_offset; - if (!i915_gem_obj_ggtt_bound(obj)) + if (!i915_gem_obj_bound(obj, vm)) continue; + obj_offset = i915_gem_obj_offset(obj, vm); need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vm)); + if ((entry->alignment && - i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) || + obj_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); if (ret) goto err; } /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - if (i915_gem_obj_ggtt_bound(obj)) + if (i915_gem_obj_bound(obj, vm)) continue; - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); if (ret) goto err; } @@ -580,7 +592,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec) + struct drm_i915_gem_exec_object2 *exec, + struct i915_address_space *vm) { struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_object *obj; @@ -664,14 +677,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); if (ret) goto err; list_for_each_entry(obj, &eb->objects, exec_list) { int offset = obj->exec_entry - exec; ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, - reloc + reloc_offset[offset]); + reloc + reloc_offset[offset], + vm); if (ret) goto err; } @@ -772,6 +786,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, static void i915_gem_execbuffer_move_to_active(struct list_head *objects, + struct i915_address_space *vm, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; @@ -840,7 +855,8 @@ static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec) + struct drm_i915_gem_exec_object2 *exec, + struct i915_address_space *vm) { drm_i915_private_t *dev_priv = dev->dev_private; struct eb_objects *eb; @@ -1002,17 +1018,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); if (ret) goto err; /* The objects are in their final locations, apply the relocations. */ if (need_relocs) - ret = i915_gem_execbuffer_relocate(eb); + ret = i915_gem_execbuffer_relocate(eb, vm); if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, - eb, exec); + eb, exec, vm); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } if (ret) @@ -1063,7 +1079,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset; + exec_start = i915_gem_obj_offset(batch_obj, vm) + + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { @@ -1088,7 +1105,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); - i915_gem_execbuffer_move_to_active(&eb->objects, ring); + i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); err: @@ -1109,6 +1126,7 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; @@ -1164,7 +1182,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.flags = I915_EXEC_RENDER; i915_execbuffer2_set_context_id(exec2, 0); - ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, + &dev_priv->gtt.base); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) @@ -1190,6 +1209,7 @@ int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; @@ -1220,7 +1240,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EFAULT; } - ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, + &dev_priv->gtt.base); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user(to_user_ptr(args->buffers_ptr), -- cgit v1.2.3 From 3089c6f239d7d2c4cb2dd5c353e8984cf79af1d7 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:03 -0700 Subject: drm/i915: make caching operate on all address spaces For now, objects will maintain the same cache levels amongst all address spaces. This is to limit the risk of bugs, as playing with cacheability in the different domains can be very error prone. In the future, it may be optimal to allow setting domains per VMA (ie. an object bound into an address space). Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8322dbe3ff16..56c300f8f9b1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3319,7 +3319,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); + struct i915_vma *vma; int ret; if (obj->cache_level == cache_level) @@ -3330,13 +3330,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + + break; + } } - if (i915_gem_obj_ggtt_bound(obj)) { + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -3358,8 +3362,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - - i915_gem_obj_to_vma(obj, &dev_priv->gtt.base)->node.color = cache_level; } if (cache_level == I915_CACHE_NONE) { @@ -3385,6 +3387,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, old_write_domain); } + list_for_each_entry(vma, &obj->vma_list, vma_link) + vma->node.color = cache_level; obj->cache_level = cache_level; i915_gem_verify_gtt(dev); return 0; -- cgit v1.2.3 From 3e12302705a961cfe86d52155b4a8cbb34214748 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:04 -0700 Subject: drm/i915: BUG_ON put_pages later With multiple VMs, the eviction code benefits from being able to blindly put pages without needing to know if there are any entities still holding on to those pages. As such it's preferable to return the -EBUSY before the BUG. Eviction code is the only user for now, but overall it makes sense anyway, IMO. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 56c300f8f9b1..f02d9234bd91 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1668,11 +1668,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages == NULL) return 0; - BUG_ON(i915_gem_obj_ggtt_bound(obj)); - if (obj->pages_pin_count) return -EBUSY; + BUG_ON(i915_gem_obj_ggtt_bound(obj)); + /* ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt * lists early. */ -- cgit v1.2.3 From d1ccbb5d711ba4994eb36c4aac84e0269b5365fe Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:05 -0700 Subject: drm/i915: make reset&hangcheck code VM aware Hangcheck, and some of the recent reset code for guilty batches need to know which address space the object was in at the time of a hangcheck. This is because we use offsets in the (PP|G)GTT to determine this information, and those offsets can differ depending on which VM they are bound into. Since we still only have 1 VM ever, this code shouldn't yet have any impact. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f02d9234bd91..b7386df82030 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2119,10 +2119,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) spin_unlock(&file_priv->mm.lock); } -static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) +static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { - if (acthd >= i915_gem_obj_ggtt_offset(obj) && - acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) + if (acthd >= i915_gem_obj_offset(obj, vm) && + acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) return true; return false; @@ -2145,6 +2146,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked, return false; } +static struct i915_address_space * +request_to_vm(struct drm_i915_gem_request *request) +{ + struct drm_i915_private *dev_priv = request->ring->dev->dev_private; + struct i915_address_space *vm; + + vm = &dev_priv->gtt.base; + + return vm; +} + static bool i915_request_guilty(struct drm_i915_gem_request *request, const u32 acthd, bool *inside) { @@ -2152,9 +2164,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request, * pointing inside the ring, matches the batch_obj address range. * However this is extremely unlikely. */ - if (request->batch_obj) { - if (i915_head_inside_object(acthd, request->batch_obj)) { + if (i915_head_inside_object(acthd, request->batch_obj, + request_to_vm(request))) { *inside = true; return true; } @@ -2174,17 +2186,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, { struct i915_ctx_hang_stats *hs = NULL; bool inside, guilty; + unsigned long offset = 0; /* Innocent until proven guilty */ guilty = false; + if (request->batch_obj) + offset = i915_gem_obj_offset(request->batch_obj, + request_to_vm(request)); + if (ring->hangcheck.action != wait && i915_request_guilty(request, acthd, &inside)) { DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", ring->name, inside ? "inside" : "flushing", - request->batch_obj ? - i915_gem_obj_ggtt_offset(request->batch_obj) : 0, + offset, request->ctx ? request->ctx->id : 0, acthd); -- cgit v1.2.3 From 3a88d0ac809a7fff315b2404559d90d8e74c716c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:49 +0300 Subject: drm/i915: Add ILK support to intel_read_wm_latency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ILK has a slightly different way to read out the watermark latency values. On ILK the LP0 latenciy values are in fact not stored in any register, and instead we must use fixed values. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4c4020631b36..e5e0fb2a3e93 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2372,6 +2372,13 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; + } else if (INTEL_INFO(dev)->gen >= 5) { + uint32_t mltr = I915_READ(MLTR_ILK); + + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; + wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; } } -- cgit v1.2.3 From 53615a5e129534fa161e882fc3c1c4f269166b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:50 +0300 Subject: drm/i915: Store the watermark latency values in dev_priv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than having to read the latency values out every time, just store them in dev_priv. On ILK and IVB there is a difference between some of the latency values for different planes, so store the latency values for each plane type separately, and apply the necesary fixups during init. v2: Fix some checkpatch complaints Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 14 ++++++++++ drivers/gpu/drm/i915/intel_pm.c | 62 +++++++++++++++++++++++++++++++++++------ 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a8b51d525f8c..da1827af6241 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1216,6 +1216,20 @@ typedef struct drm_i915_private { struct i915_suspend_saved_registers regfile; + struct { + /* + * Raw watermark latency values: + * in 0.1us units for WM0, + * in 0.5us units for WM1+. + */ + /* primary */ + uint16_t pri_latency[5]; + /* sprite */ + uint16_t spr_latency[5]; + /* cursor */ + uint16_t cur_latency[5]; + } wm; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e5e0fb2a3e93..87db1f064fc4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2382,6 +2382,39 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) } } +static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + /* ILK sprite LP0 latency is 1300 ns */ + if (INTEL_INFO(dev)->gen == 5) + wm[0] = 13; +} + +static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + /* ILK cursor LP0 latency is 1300 ns */ + if (INTEL_INFO(dev)->gen == 5) + wm[0] = 13; + + /* WaDoubleCursorLP3Latency:ivb */ + if (IS_IVYBRIDGE(dev)) + wm[3] *= 2; +} + +static void intel_setup_wm_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_read_wm_latency(dev, dev_priv->wm.pri_latency); + + memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, + sizeof(dev_priv->wm.pri_latency)); + memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, + sizeof(dev_priv->wm.pri_latency)); + + intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); + intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); +} + static void hsw_compute_wm_parameters(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, struct hsw_wm_maximums *lp_max_1_2, @@ -2627,16 +2660,17 @@ static void haswell_update_wm(struct drm_device *dev) struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; struct hsw_pipe_wm_parameters params[3]; struct hsw_wm_values results_1_2, results_5_6, *best_results; - uint16_t wm[5] = {}; enum hsw_data_buf_partitioning partitioning; - intel_read_wm_latency(dev, wm); hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); - hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2); + hsw_compute_wm_results(dev, params, + dev_priv->wm.pri_latency, + &lp_max_1_2, &results_1_2); if (lp_max_1_2.pri != lp_max_5_6.pri) { - hsw_compute_wm_results(dev, params, wm, &lp_max_5_6, - &results_5_6); + hsw_compute_wm_results(dev, params, + dev_priv->wm.pri_latency, + &lp_max_5_6, &results_5_6); best_results = hsw_find_best_result(&results_1_2, &results_5_6); } else { best_results = &results_1_2; @@ -5229,8 +5263,12 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { + intel_setup_wm_latency(dev); + if (IS_GEN5(dev)) { - if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + if (dev_priv->wm.pri_latency[1] && + dev_priv->wm.spr_latency[1] && + dev_priv->wm.cur_latency[1]) dev_priv->display.update_wm = ironlake_update_wm; else { DRM_DEBUG_KMS("Failed to get proper latency. " @@ -5239,7 +5277,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = ironlake_init_clock_gating; } else if (IS_GEN6(dev)) { - if (SNB_READ_WM0_LATENCY()) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = sandybridge_update_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; } else { @@ -5249,7 +5289,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = gen6_init_clock_gating; } else if (IS_IVYBRIDGE(dev)) { - if (SNB_READ_WM0_LATENCY()) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = ivybridge_update_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; } else { @@ -5259,7 +5301,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; } else if (IS_HASWELL(dev)) { - if (I915_READ64(MCH_SSKPD)) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = haswell_update_wm; dev_priv->display.update_sprite_wm = haswell_update_sprite_wm; -- cgit v1.2.3 From 5b77da33c11b72d703382a93c402544186c7721e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:51 +0300 Subject: drm/i915: Use the stored cursor and plane latencies properly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than pass around the plane latencies, just grab them from dev_priv nearer to where they're needed. Do the same for cursor latencies. v2: Add some comments about latency units Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 87db1f064fc4..936c1628075a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2270,7 +2270,8 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, params->pri_bytes_per_pixel); } -static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, +static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, + int level, struct hsw_wm_maximums *max, struct hsw_pipe_wm_parameters *params, struct hsw_lp_wm_result *result) { @@ -2279,10 +2280,14 @@ static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { struct hsw_pipe_wm_parameters *p = ¶ms[pipe]; - - pri_val[pipe] = ilk_compute_pri_wm(p, mem_value, true); - spr_val[pipe] = ilk_compute_spr_wm(p, mem_value); - cur_val[pipe] = ilk_compute_cur_wm(p, mem_value); + /* WM1+ latency values stored in 0.5us units */ + uint16_t pri_latency = dev_priv->wm.pri_latency[level] * 5; + uint16_t spr_latency = dev_priv->wm.spr_latency[level] * 5; + uint16_t cur_latency = dev_priv->wm.cur_latency[level] * 5; + + pri_val[pipe] = ilk_compute_pri_wm(p, pri_latency, true); + spr_val[pipe] = ilk_compute_spr_wm(p, spr_latency); + cur_val[pipe] = ilk_compute_cur_wm(p, cur_latency); fbc_val[pipe] = ilk_compute_fbc_wm(p, pri_val[pipe]); } @@ -2305,14 +2310,18 @@ static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, } static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, - uint32_t mem_value, enum pipe pipe, + enum pipe pipe, struct hsw_pipe_wm_parameters *params) { uint32_t pri_val, cur_val, spr_val; + /* WM0 latency values stored in 0.1us units */ + uint16_t pri_latency = dev_priv->wm.pri_latency[0]; + uint16_t spr_latency = dev_priv->wm.spr_latency[0]; + uint16_t cur_latency = dev_priv->wm.cur_latency[0]; - pri_val = ilk_compute_pri_wm(params, mem_value, false); - spr_val = ilk_compute_spr_wm(params, mem_value); - cur_val = ilk_compute_cur_wm(params, mem_value); + pri_val = ilk_compute_pri_wm(params, pri_latency, false); + spr_val = ilk_compute_spr_wm(params, spr_latency); + cur_val = ilk_compute_cur_wm(params, cur_latency); WARN(pri_val > 127, "Primary WM error, mode not supported for pipe %c\n", @@ -2478,7 +2487,6 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, static void hsw_compute_wm_results(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint16_t *wm, struct hsw_wm_maximums *lp_maximums, struct hsw_wm_values *results) { @@ -2489,7 +2497,8 @@ static void hsw_compute_wm_results(struct drm_device *dev, int level, max_level, wm_lp; for (level = 1; level <= 4; level++) - if (!hsw_compute_lp_wm(wm[level] * 5, lp_maximums, params, + if (!hsw_compute_lp_wm(dev_priv, level, + lp_maximums, params, &lp_results[level - 1])) break; max_level = level - 1; @@ -2521,8 +2530,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, } for_each_pipe(pipe) - results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0], - pipe, + results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, ¶ms[pipe]); for_each_pipe(pipe) { @@ -2665,11 +2673,9 @@ static void haswell_update_wm(struct drm_device *dev) hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); hsw_compute_wm_results(dev, params, - dev_priv->wm.pri_latency, &lp_max_1_2, &results_1_2); if (lp_max_1_2.pri != lp_max_5_6.pri) { hsw_compute_wm_results(dev, params, - dev_priv->wm.pri_latency, &lp_max_5_6, &results_5_6); best_results = hsw_find_best_result(&results_1_2, &results_5_6); } else { -- cgit v1.2.3 From 26ec971e302c53b44cc5627ffe209a7d33199e28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:52 +0300 Subject: drm/i915: Print the watermark latencies during init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Seeing the watermark latency values in dmesg might help sometimes. v2: Use DRM_ERROR() when expected latency values are missing Note: We might hit the DRM_ERROR added in this patch and apparently there's not much we can do about that. But I think it'd be interesting to figure out whether that actually happens in the real world, so I didn't apply a s/DRM_ERROR/DRM_DEBUG_KMS/ bikeshed while applying. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni [danvet: Add note about new error dmesg output.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 936c1628075a..8358d73ae468 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2409,6 +2409,39 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) wm[3] *= 2; } +static void intel_print_wm_latency(struct drm_device *dev, + const char *name, + const uint16_t wm[5]) +{ + int level, max_level; + + /* how many WM levels are we expecting */ + if (IS_HASWELL(dev)) + max_level = 4; + else if (INTEL_INFO(dev)->gen >= 6) + max_level = 3; + else + max_level = 2; + + for (level = 0; level <= max_level; level++) { + unsigned int latency = wm[level]; + + if (latency == 0) { + DRM_ERROR("%s WM%d latency not provided\n", + name, level); + continue; + } + + /* WM1+ latency values in 0.5us units */ + if (level > 0) + latency *= 5; + + DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", + name, level, wm[level], + latency / 10, latency % 10); + } +} + static void intel_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2422,6 +2455,10 @@ static void intel_setup_wm_latency(struct drm_device *dev) intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); + + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); } static void hsw_compute_wm_parameters(struct drm_device *dev, -- cgit v1.2.3 From 3312ba65caa23cf1210cc578755babc394769843 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:53 +0300 Subject: drm/i915: Disable specific watermark levels when latency is zero MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return UINT_MAX for the calculated WM level if the latency is zero. This will lead to marking the WM level as disabled. I'm not sure if latency==0 should mean that we want to disable the level. But that's the implication I got from the fact that we don't even enable the watermark code of the SSKDP register is 0. v2: Use WARN() to scare people Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8358d73ae468..856c094a35e0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2131,6 +2131,9 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, { uint64_t ret; + if (WARN(latency == 0, "Latency value missing\n")) + return UINT_MAX; + ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; @@ -2143,6 +2146,9 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, { uint32_t ret; + if (WARN(latency == 0, "Latency value missing\n")) + return UINT_MAX; + ret = (latency * pixel_rate) / (pipe_htotal * 10000); ret = (ret + 1) * horiz_pixels * bytes_per_pixel; ret = DIV_ROUND_UP(ret, 64) + 2; -- cgit v1.2.3 From b0aea5dca064176a626dc2a83727c60ace31ee6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:54 +0300 Subject: drm/i915: Use the watermark latency values from dev_priv for ILK/SNB/IVB too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjust the current ILK/SNB/IVB watermark codepaths to use the pre-populated latency values from dev_priv instead of reading them out from the registers every time. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 9 ------- drivers/gpu/drm/i915/intel_pm.c | 57 +++++++++++++++++++---------------------- 2 files changed, 27 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3aebe5dee4df..fab94be89dfa 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3196,9 +3196,6 @@ #define MLTR_WM2_SHIFT 8 /* the unit of memory self-refresh latency time is 0.5us */ #define ILK_SRLT_MASK 0x3f -#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) -#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) -#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) /* define the fifo size on Ironlake */ #define ILK_DISPLAY_FIFO 128 @@ -3245,12 +3242,6 @@ #define SSKPD_WM2_SHIFT 16 #define SSKPD_WM3_SHIFT 24 -#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) -#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) -#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) -#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) -#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) - /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 856c094a35e0..ccaadc87b6bb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1680,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev) I915_WRITE(FW_BLC, fwater_lo); } -#define ILK_LP0_PLANE_LATENCY 700 -#define ILK_LP0_CURSOR_LATENCY 1300 - /* * Check the wm result. * @@ -1797,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev) enabled = 0; if (g4x_compute_wm0(dev, PIPE_A, &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, + dev_priv->wm.pri_latency[0] * 100, &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, + dev_priv->wm.cur_latency[0] * 100, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); @@ -1811,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev) if (g4x_compute_wm0(dev, PIPE_B, &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, + dev_priv->wm.pri_latency[0] * 100, &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, + dev_priv->wm.cur_latency[0] * 100, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); @@ -1837,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - ILK_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1845,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - ILK_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1860,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); @@ -1874,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev) static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ u32 val; int fbc_wm, plane_wm, cursor_wm; unsigned int enabled; @@ -1929,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1937,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1952,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM3 */ if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.pri_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1967,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM3_LP_ILK, WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); @@ -1976,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev) static void ivybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ u32 val; int fbc_wm, plane_wm, cursor_wm; int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; @@ -2046,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -2054,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -2069,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM3, note we have to correct the cursor latency */ if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.pri_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &ignore_cursor_wm) || !ironlake_compute_srwm(dev, 3, enabled, - 2 * SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.cur_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) @@ -2089,7 +2086,7 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM3_LP_ILK, WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); @@ -2833,7 +2830,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, bool enable, bool scaled) { struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ u32 val; int sprite_wm, reg; int ret; @@ -2873,7 +2870,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.spr_latency[1] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", @@ -2889,7 +2886,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.spr_latency[2] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", @@ -2901,7 +2898,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.spr_latency[3] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", -- cgit v1.2.3 From 37126462a4feadeb3ff08c4b308a28e4db8c83a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Aug 2013 16:18:55 +0300 Subject: drm/i915: Add comments about units of latency values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All the ILK+ WM compute functions take the latency values in 0.1us units. Add a few comments to remind people about that. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccaadc87b6bb..c20d68298072 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2123,6 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, return pixel_rate; } +/* latency must be in 0.1us units. */ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, uint32_t latency) { @@ -2137,6 +2138,7 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, return ret; } +/* latency must be in 0.1us units. */ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, uint32_t horiz_pixels, uint8_t bytes_per_pixel, uint32_t latency) @@ -2200,7 +2202,10 @@ enum hsw_data_buf_partitioning { HSW_DATA_BUF_PART_5_6, }; -/* For both WM_PIPE and WM_LP. */ +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value, bool is_lp) @@ -2227,7 +2232,10 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, return min(method1, method2); } -/* For both WM_PIPE and WM_LP. */ +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { @@ -2247,7 +2255,10 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, return min(method1, method2); } -/* For both WM_PIPE and WM_LP. */ +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { -- cgit v1.2.3 From 637efacf8fcf112a188dd005c816e2b0f39894f0 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 5 Aug 2013 09:46:44 -0700 Subject: drm/i915: eliminate dead domain clearing on reset The code itself is no longer accurate without updating once we have multiple address space since clearing the domains of every object requires scanning the inactive list for all VMs. "This code is dead. Just remove it rather than port it to vma." - Chris Wilson Recommended-by: Chris Wilson Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b7386df82030..3debb35b7195 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2291,20 +2291,12 @@ void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; - struct drm_i915_gem_object *obj; struct intel_ring_buffer *ring; int i; for_each_ring(ring, dev_priv, i) i915_gem_reset_ring_lists(dev_priv, ring); - /* Move everything out of the GPU domains to ensure we do any - * necessary invalidation upon reuse. - */ - list_for_each_entry(obj, &vm->inactive_list, mm_list) - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; - i915_gem_restore_fences(dev); } -- cgit v1.2.3 From f72d19f069f8efaa535aacc719d23d469b0d9f18 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 5 Aug 2013 17:25:55 -0300 Subject: drm/i915: silence useless messages about DDI buffer translation These messages are not really useful since it's very easy to check which mode is used for each port: The values programmed are based on the port type, then assigned to the ddi_translations variable. Currently we use DP mode for ports A-D and FDI mode for port E. Also, when we add the code to enable/disable PC8+, intel_prepare_ddi_buffers will be called more often and will eat your dmesg buffers. While at it, fix the coding style of the "for" statement above. Signed-off-by: Paulo Zanoni [danvet: Pimp commit message with Paulo's more detailed explanation of how the ddi translation buffer settings are computed, to answer a question from Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b361c0862373..b6281d9e4d62 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -94,15 +94,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, hsw_ddi_translations_fdi : hsw_ddi_translations_dp); - DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", - port_name(port), - use_fdi_mode ? "FDI" : "DP"); - - WARN((use_fdi_mode && (port != PORT_E)), - "Programming port %c in FDI mode, this probably will not work.\n", - port_name(port)); - - for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { + for (i = 0, reg = DDI_BUF_TRANS(port); + i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { I915_WRITE(reg, ddi_translations[i]); reg += 4; } -- cgit v1.2.3 From ad8d270c21aa2f57f0cd578b1737f65c44c34b80 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 5 Aug 2013 17:25:56 -0300 Subject: drm/i915: remove use_fdi_mode argument from intel_prepare_ddi_buffers We set the mode based on the port, and we already pass the port as an argument. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b6281d9e4d62..b8c096b4a1de 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -84,15 +84,14 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) * in either FDI or DP modes only, as HDMI connections will work with both * of those */ -static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, - bool use_fdi_mode) +static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; int i; - const u32 *ddi_translations = ((use_fdi_mode) ? + const u32 *ddi_translations = (port == PORT_E) ? hsw_ddi_translations_fdi : - hsw_ddi_translations_dp); + hsw_ddi_translations_dp; for (i = 0, reg = DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { @@ -111,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev) if (!HAS_DDI(dev)) return; - for (port = PORT_A; port < PORT_E; port++) - intel_prepare_ddi_buffers(dev, port, false); - - /* DDI E is the suggested one to work in FDI mode, so program is as such - * by default. It will have to be re-programmed in case a digital DP - * output will be detected on it - */ - intel_prepare_ddi_buffers(dev, PORT_E, true); + for (port = PORT_A; port <= PORT_E; port++) + intel_prepare_ddi_buffers(dev, port); } static const long hsw_ddi_buf_ctl_values[] = { -- cgit v1.2.3 From 350ec881d966453bdcf1d3299071e90da4e507b4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2013 13:17:02 +0100 Subject: drm/i915: Rename I915_CACHE_MLC_LLC to L3_LLC for Ivybridge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MLC_LLC was never validated for Sandybridge and was superseded by a new level of cacheing for the GPU in Ivybridge. Update our names to be consistent with usage, and in the process stop setting the unwanted bit on Sandybridge. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä [danvet: s/BUG/WARN_ON(1) bikeshed.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 +++++-- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 37 ++++++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_gpu_error.c | 4 ++-- 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index da1827af6241..290c12dd6e6b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -449,8 +449,11 @@ struct intel_device_info { enum i915_cache_level { I915_CACHE_NONE = 0, - I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ + I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ + I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc + caches, eg sampler/render caches, and the + large Last-Level-Cache. LLC is coherent with + the CPU, but L3 is only visible to the GPU. */ }; typedef uint32_t gen6_gtt_pte_t; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d1cb28cbc71e..7273a729a039 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 7) { ret = i915_gem_object_set_cache_level(ctx->obj, - I915_CACHE_LLC_MLC); + I915_CACHE_L3_LLC); /* Failure shouldn't ever happen this early */ if (WARN_ON(ret)) goto err_out; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f38cc696be7f..24fb989593f0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -43,7 +43,7 @@ #define GEN6_PTE_UNCACHED (1 << 1) #define HSW_PTE_UNCACHED (0) #define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) +#define GEN7_PTE_CACHE_L3_LLC (3 << 1) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) @@ -56,15 +56,36 @@ #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) -static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, - enum i915_cache_level level) +static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { - case I915_CACHE_LLC_MLC: - pte |= GEN6_PTE_CACHE_LLC_MLC; + case I915_CACHE_L3_LLC: + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + WARN_ON(1); + } + + return pte; +} + +static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level) +{ + gen6_gtt_pte_t pte = GEN6_PTE_VALID; + pte |= GEN6_PTE_ADDR_ENCODE(addr); + + switch (level) { + case I915_CACHE_L3_LLC: + pte |= GEN7_PTE_CACHE_L3_LLC; break; case I915_CACHE_LLC: pte |= GEN6_PTE_CACHE_LLC; @@ -73,7 +94,7 @@ static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, pte |= GEN6_PTE_UNCACHED; break; default: - BUG(); + WARN_ON(1); } return pte; @@ -890,8 +911,10 @@ int i915_gem_gtt_init(struct drm_device *dev) gtt->base.pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev)) gtt->base.pte_encode = byt_pte_encode; + else if (INTEL_INFO(dev)->gen >= 7) + gtt->base.pte_encode = ivb_pte_encode; else - gtt->base.pte_encode = gen6_pte_encode; + gtt->base.pte_encode = snb_pte_encode; } ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d970d84da65f..8091485e7e88 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -938,8 +938,8 @@ const char *i915_cache_level_str(int type) { switch (type) { case I915_CACHE_NONE: return " uncached"; - case I915_CACHE_LLC: return " snooped (LLC)"; - case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; + case I915_CACHE_LLC: return " snooped or LLC"; + case I915_CACHE_L3_LLC: return " L3+LLC"; default: return ""; } } -- cgit v1.2.3 From ddfe15677d9c47f2491e401cd773b45e1aac74bf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2013 17:43:07 +0100 Subject: drm/i915: Export intel_framebuffer_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than open-code the teardown of a framebuffer, export the routine from intel_display.c. This then make intel_fbdev symmetric in its use of the common intel_framebuffer routines to initialise and clean up the struct intel_framebuffer. (And new features need only be added in one location!) Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 10 +++++++--- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_fb.c | 15 +++++---------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 347e45a2b8fb..4127ad2890f3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9402,13 +9402,17 @@ static void intel_setup_outputs(struct drm_device *dev) drm_helper_move_panel_connectors_to_head(dev); } +void intel_framebuffer_fini(struct intel_framebuffer *fb) +{ + drm_framebuffer_cleanup(&fb->base); + drm_gem_object_unreference_unlocked(&fb->obj->base); +} + static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(&intel_fb->obj->base); - + intel_framebuffer_fini(intel_fb); kfree(intel_fb); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ed33976c194b..54e389de9f42 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -720,6 +720,7 @@ extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); +extern void intel_framebuffer_fini(struct intel_framebuffer *fb); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_initial_config(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index f3c97e05b0d8..bc2100007b21 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { - struct fb_info *info; - struct intel_framebuffer *ifb = &ifbdev->ifb; - if (ifbdev->helper.fbdev) { - info = ifbdev->helper.fbdev; + struct fb_info *info = ifbdev->helper.fbdev; + unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); - drm_framebuffer_unregister_private(&ifb->base); - drm_framebuffer_cleanup(&ifb->base); - if (ifb->obj) { - drm_gem_object_unreference_unlocked(&ifb->obj->base); - ifb->obj = NULL; - } + drm_framebuffer_unregister_private(&ifbdev->ifb.base); + intel_framebuffer_fini(&ifbdev->ifb); } int intel_fbdev_init(struct drm_device *dev) -- cgit v1.2.3 From 0b02e798ffec99b51f2fe931ceb61ca0d22d2a70 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:08 -0700 Subject: drm/i915: Improve VMA comments Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 290c12dd6e6b..4f93467fdfdd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -545,7 +545,12 @@ struct i915_hw_ppgtt { int (*enable)(struct drm_device *dev); }; -/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime +/** + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime * will always be <= an objects lifetime. So object refcounting should cover us. */ struct i915_vma { -- cgit v1.2.3 From 43387b37fa2d0f368142b8fa8c9440da92e5381b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 16 Jul 2013 09:12:04 +0200 Subject: drm/gem: create drm_gem_dumb_destroy All the gem based kms drivers really want the same function to destroy a dumb framebuffer backing storage object. So give it to them and roll it out in all drivers. This still leaves the option open for kms drivers which don't use GEM for backing storage, but it does decently simplify matters for gem drivers. Acked-by: Inki Dae Acked-by: Laurent Pinchart Cc: Intel Graphics Development Cc: Ben Skeggs Reviwed-by: Rob Clark Cc: Alex Deucher Acked-by: Patrik Jakobsson Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_drv.c | 2 +- drivers/gpu/drm/ast/ast_drv.h | 3 --- drivers/gpu/drm/ast/ast_main.c | 7 ------- drivers/gpu/drm/cirrus/cirrus_drv.c | 2 +- drivers/gpu/drm/cirrus/cirrus_drv.h | 3 --- drivers/gpu/drm/cirrus/cirrus_main.c | 7 ------- drivers/gpu/drm/drm_gem.c | 14 ++++++++++++++ drivers/gpu/drm/drm_gem_cma_helper.c | 10 ---------- drivers/gpu/drm/exynos/exynos_drm_drv.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 20 -------------------- drivers/gpu/drm/exynos/exynos_drm_gem.h | 9 --------- drivers/gpu/drm/gma500/gem.c | 17 ----------------- drivers/gpu/drm/gma500/psb_drv.c | 2 +- drivers/gpu/drm/gma500/psb_drv.h | 2 -- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 7 ------- drivers/gpu/drm/mgag200/mgag200_drv.c | 2 +- drivers/gpu/drm/mgag200/mgag200_drv.h | 3 --- drivers/gpu/drm/mgag200/mgag200_main.c | 7 ------- drivers/gpu/drm/nouveau/nouveau_display.c | 7 ------- drivers/gpu/drm/nouveau/nouveau_display.h | 2 -- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +- drivers/gpu/drm/omapdrm/omap_drv.h | 2 -- drivers/gpu/drm/omapdrm/omap_gem.c | 15 --------------- drivers/gpu/drm/qxl/qxl_drv.c | 2 +- drivers/gpu/drm/qxl/qxl_drv.h | 3 --- drivers/gpu/drm/qxl/qxl_dumb.c | 7 ------- drivers/gpu/drm/radeon/radeon.h | 3 --- drivers/gpu/drm/radeon/radeon_drv.c | 5 +---- drivers/gpu/drm/radeon/radeon_gem.c | 7 ------- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 2 +- drivers/gpu/drm/shmobile/shmob_drm_drv.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 2 +- drivers/gpu/drm/udl/udl_drv.c | 2 +- drivers/gpu/drm/udl/udl_drv.h | 2 -- drivers/gpu/drm/udl/udl_gem.c | 6 ------ drivers/gpu/host1x/drm/drm.c | 2 +- drivers/gpu/host1x/drm/gem.c | 6 ------ drivers/gpu/host1x/drm/gem.h | 2 -- drivers/staging/imx-drm/imx-drm-core.c | 2 +- include/drm/drmP.h | 3 +++ include/drm/drm_gem_cma_helper.h | 8 -------- 44 files changed, 33 insertions(+), 186 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index df0d0a08097a..a144fb044852 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -216,7 +216,7 @@ static struct drm_driver driver = { .gem_free_object = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, - .dumb_destroy = ast_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, }; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 622d4ae7eb9e..796dbb212a41 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo) extern int ast_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -extern int ast_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle); extern int ast_gem_init_object(struct drm_gem_object *obj); extern void ast_gem_free_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index c195dc2abc09..7f6152d374ca 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file, return 0; } -int ast_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - int ast_gem_init_object(struct drm_gem_object *obj) { BUG(); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 8ecb601152ef..d35d99c15f84 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -102,7 +102,7 @@ static struct drm_driver driver = { .gem_free_object = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, - .dumb_destroy = cirrus_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, }; static struct pci_driver cirrus_pci_driver = { diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index bae55609e6c3..9b0bb9184afd 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev, int cirrus_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -int cirrus_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle); int cirrus_framebuffer_init(struct drm_device *dev, struct cirrus_framebuffer *gfb, diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 3a7a0efe3675..f130a533a512 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file, return 0; } -int cirrus_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - int cirrus_gem_init_object(struct drm_gem_object *obj) { BUG(); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 1f7657286f04..9ab038c8dd5f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -243,6 +243,20 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) } EXPORT_SYMBOL(drm_gem_handle_delete); +/** + * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers + * + * This implements the ->dumb_destroy kms driver callback for drivers which use + * gem to manage their backing storage. + */ +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} +EXPORT_SYMBOL(drm_gem_dumb_destroy); + /** * Create a handle for this object. This adds a handle reference * to the object, which includes a regular reference count. Callers diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 847f09117666..0a4f80574eb4 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -281,16 +281,6 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) } EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); -/* - * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function - */ -int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, - struct drm_device *drm, unsigned int handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); - #ifdef CONFIG_DEBUG_FS void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index ca2729a85129..21fc28ae5e6e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -271,7 +271,7 @@ static struct drm_driver exynos_drm_driver = { .gem_vm_ops = &exynos_drm_gem_vm_ops, .dumb_create = exynos_drm_gem_dumb_create, .dumb_map_offset = exynos_drm_gem_dumb_map_offset, - .dumb_destroy = exynos_drm_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = exynos_dmabuf_prime_export, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index be32db1ab290..b904633863e8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -717,26 +717,6 @@ unlock: return ret; } -int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - unsigned int handle) -{ - int ret; - - /* - * obj->refcount and obj->handle_count are decreased and - * if both them are 0 then exynos_drm_gem_free_object() - * would be called by callback to release resources. - */ - ret = drm_gem_handle_delete(file_priv, handle); - if (ret < 0) { - DRM_ERROR("failed to delete drm_gem_handle.\n"); - return ret; - } - - return 0; -} - int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 468766bee450..09555afdfe9c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); -/* - * destroy memory region allocated. - * - a gem handle and physical memory region pointed by a gem object - * would be released by drm_gem_handle_delete(). - */ -int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - unsigned int handle); - /* page fault handler and mmap fault address(virtual) to physical memory. */ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 2f77bea30b11..10ae8c52d06f 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -161,23 +161,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return psb_gem_create(file, dev, args->size, &args->handle); } -/** - * psb_gem_dumb_destroy - destroy a dumb buffer - * @file: client file - * @dev: our DRM device - * @handle: the object handle - * - * Destroy a handle that was created via psb_gem_dumb_create, at least - * we hope it was created that way. i915 seems to assume the caller - * does the checking but that might be worth review ! FIXME - */ -int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - /* No special work needed, drop the reference and see what falls out */ - return drm_gem_handle_delete(file, handle); -} - /** * psb_gem_fault - pagefault handler for GEM objects * @vma: the VMA of the GEM object diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index bddea5807442..ed06d5ce3757 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -652,7 +652,7 @@ static struct drm_driver driver = { .gem_vm_ops = &psb_gem_vm_ops, .dumb_create = psb_gem_dumb_create, .dumb_map_offset = psb_gem_dumb_map_gtt, - .dumb_destroy = psb_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .fops = &psb_gem_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 6053b8abcd12..984cacfcbaf2 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data, struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle); extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset); extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b07362f2675e..cca12db6dbb7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1163,7 +1163,7 @@ static struct drm_driver driver = { .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, - .dumb_destroy = i915_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .ioctls = i915_ioctls, .fops = &i915_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cef35d3ab37b..5cb3e4d34230 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1732,8 +1732,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); -int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle); /** * Returns true if seq1 is later than seq2. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8673a000a373..2aa0894b59cd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -247,13 +247,6 @@ i915_gem_dumb_create(struct drm_file *file, args->size, &args->handle); } -int i915_gem_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - /** * Creates a new mm object and returns a handle to it. */ diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 122b571ccc7c..bd9196478735 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -104,7 +104,7 @@ static struct drm_driver driver = { .gem_free_object = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, - .dumb_destroy = mgag200_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, }; static struct pci_driver mgag200_pci_driver = { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 12e2499d9352..baaae19332e2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj); int mgag200_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -int mgag200_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle); void mgag200_gem_free_object(struct drm_gem_object *obj); int mgag200_dumb_mmap_offset(struct drm_file *file, diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 1a75ea395b33..0f8b861b10b3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file, return 0; } -int mgag200_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - int mgag200_gem_init_object(struct drm_gem_object *obj) { BUG(); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7a8caa126db6..c8ffba24720a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -680,13 +680,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, return ret; } -int -nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - int nouveau_display_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 1ea3e4734b62..185e74132a6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -68,8 +68,6 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *, struct drm_mode_create_dumb *args); int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, u32 handle, u64 *offset); -int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *, - u32 handle); void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index e990327d117a..a900cde497a6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -715,7 +715,7 @@ driver = { .dumb_create = nouveau_display_dumb_create, .dumb_map_offset = nouveau_display_dumb_map_offset, - .dumb_destroy = nouveau_display_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index a3004f12b9a3..1ddd1a15764d 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -633,7 +633,7 @@ static struct drm_driver omap_drm_driver = { .gem_vm_ops = &omap_gem_vm_ops, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, - .dumb_destroy = omap_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .ioctls = ioctls, .num_ioctls = DRM_OMAP_NUM_IOCTLS, .fops = &omapdriver_fops, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 14f17da2ce25..f2ba425d80dd 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -225,8 +225,6 @@ int omap_gem_init_object(struct drm_gem_object *obj); void *omap_gem_vaddr(struct drm_gem_object *obj); int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset); -int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle); int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index f90531fc00c9..b1f19702550f 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -628,21 +628,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); } -/** - * omap_gem_dumb_destroy - destroy a dumb buffer - * @file: client file - * @dev: our DRM device - * @handle: the object handle - * - * Destroy a handle that was created via omap_gem_dumb_create. - */ -int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - /* No special work needed, drop the reference and see what falls out */ - return drm_gem_handle_delete(file, handle); -} - /** * omap_gem_dumb_map - buffer mapping for dumb interface * @file: our drm client file diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index df0b577a6608..48f2dfdeabcb 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -221,7 +221,7 @@ static struct drm_driver qxl_driver = { .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = qxl_mode_dumb_mmap, - .dumb_destroy = qxl_mode_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, .debugfs_cleanup = qxl_debugfs_takedown, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a3..57cb7a8e6fb5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -418,9 +418,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); int qxl_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int qxl_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); int qxl_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 847c4ee798f7..d34bb4130ff0 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, return 0; } -int qxl_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - int qxl_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..19066d1dcb7d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -492,9 +492,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, int radeon_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); -int radeon_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); /* * Semaphores. diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cb7f1a8c5a4a..4071fe70c416 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -118,9 +118,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int radeon_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, size_t size, @@ -421,7 +418,7 @@ static struct drm_driver kms_driver = { .gem_close_object = radeon_gem_object_close, .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, - .dumb_destroy = radeon_mode_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .fops = &radeon_driver_kms_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index aa796031ab65..dce99c8a5835 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, return 0; } -int radeon_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_gem_info(struct seq_file *m, void *data) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index dc0fe09b2ba1..5069d9c234bb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -258,7 +258,7 @@ static struct drm_driver rcar_du_driver = { .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = rcar_du_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .fops = &rcar_du_fops, .name = "rcar-du", .desc = "Renesas R-Car Display Unit", diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 5f83f9a3ef59..7f2ea1a5a45f 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_driver = { .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .fops = &shmob_drm_fops, .name = "shmob-drm", .desc = "Renesas SH Mobile DRM", diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 40b71da5a214..14801c2235ae 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -519,7 +519,7 @@ static struct drm_driver tilcdc_driver = { .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, #ifdef CONFIG_DEBUG_FS .debugfs_init = tilcdc_debugfs_init, .debugfs_cleanup = tilcdc_debugfs_cleanup, diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index c0770dbba74a..bb0af58c769a 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -84,7 +84,7 @@ static struct drm_driver driver = { .dumb_create = udl_dumb_create, .dumb_map_offset = udl_gem_mmap, - .dumb_destroy = udl_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .fops = &udl_driver_fops, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index cc6d90f28c71..56aec9409fa3 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); -int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle); int udl_gem_init_object(struct drm_gem_object *obj); void udl_gem_free_object(struct drm_gem_object *gem_obj); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 2a4cb2f83b36..b5e3b8038253 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file, args->size, &args->handle); } -int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c index e184b00faacd..87aa09bf7ba8 100644 --- a/drivers/gpu/host1x/drm/drm.c +++ b/drivers/gpu/host1x/drm/drm.c @@ -633,7 +633,7 @@ struct drm_driver tegra_drm_driver = { .gem_vm_ops = &tegra_bo_vm_ops, .dumb_create = tegra_bo_dumb_create, .dumb_map_offset = tegra_bo_dumb_map_offset, - .dumb_destroy = tegra_bo_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .ioctls = tegra_drm_ioctls, .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c index bc323b3dbe4d..3c35622c9f15 100644 --- a/drivers/gpu/host1x/drm/gem.c +++ b/drivers/gpu/host1x/drm/gem.c @@ -261,9 +261,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) return ret; } - -int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm, - unsigned int handle) -{ - return drm_gem_handle_delete(file, handle); -} diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h index 34de2b486eb7..2e93b0379da8 100644 --- a/drivers/gpu/host1x/drm/gem.h +++ b/drivers/gpu/host1x/drm/gem.h @@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args); int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, uint32_t handle, uint64_t *offset); -int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm, - unsigned int handle); int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 9854a1daf606..a827858af944 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -797,7 +797,7 @@ static struct drm_driver imx_drm_driver = { .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .get_vblank_counter = drm_vblank_count, .enable_vblank = imx_drm_enable_vblank, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 4b518e05d293..a029dea13f48 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1548,6 +1548,9 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page ** extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index c34f27f80bcc..89b4d7db1ebd 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, /* set vm_flags and we can change the vm attribute to other one at here. */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); -/* - * destroy memory region allocated. - * - a gem handle and physical memory region pointed by a gem object - * would be released by drm_gem_handle_delete(). - */ -int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, - struct drm_device *drm, unsigned int handle); - /* allocate physical memory. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size); -- cgit v1.2.3 From 7fc65eb731cda8304865669166fb9a4c519bee69 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2013 09:59:46 +0100 Subject: drm: Apply kref_put_mutex() optimisations to drm_gem_object_unreference_unlocked() We can apply the same optimisation tricks as kref_put_mutex() in our local equivalent function. However, we have a different locking semantic (we unlock ourselves, in kref_put_mutex() the callee unlocks) so that we can use the same callbacks for both locked and unlocked kref_put()s and so can not simply convert to using kref_put_mutex() directly. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- include/drm/drmP.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index a029dea13f48..3b7fda557b8d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1629,10 +1629,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { - if (obj != NULL) { + if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) { struct drm_device *dev = obj->dev; + mutex_lock(&dev->struct_mutex); - kref_put(&obj->refcount, drm_gem_object_free); + if (likely(atomic_dec_and_test(&obj->refcount.refcount))) + drm_gem_object_free(&obj->refcount); mutex_unlock(&dev->struct_mutex); } } -- cgit v1.2.3 From 31e5d7c67bd492fd0b2988440e21e31809c7c9af Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 27 Jul 2013 13:36:27 +0200 Subject: drm/mm: add "best_match" flag to drm_mm_insert_node() Add a "best_match" flag similar to the drm_mm_search_*() helpers so we can convert TTM to use them in follow up patches. We can also inline the non-generic helpers and move them into the header to allow compile-time optimizations. To make calls to drm_mm_{search,insert}_node() more readable, this converts the boolean argument to a flagset. There are pending patches that add additional flags for top-down allocators and more. v2: - use flag parameter instead of boolean "best_match" - convert *_search_free() helpers to also use flags argument Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 37 ++++++++--------------- drivers/gpu/drm/drm_vma_manager.c | 4 +-- drivers/gpu/drm/i915/i915_gem.c | 3 +- drivers/gpu/drm/i915/i915_gem_stolen.c | 12 +++++--- drivers/gpu/drm/sis/sis_mm.c | 6 ++-- drivers/gpu/drm/ttm/ttm_bo_manager.c | 3 +- drivers/gpu/drm/via/via_mm.c | 4 +-- include/drm/drm_mm.h | 54 ++++++++++++++++++++++------------ 8 files changed, 68 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index fe304f903b13..9a383272c8c7 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -212,12 +212,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic); */ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, unsigned long size, unsigned alignment, - unsigned long color) + unsigned long color, + enum drm_mm_search_flags flags) { struct drm_mm_node *hole_node; hole_node = drm_mm_search_free_generic(mm, size, alignment, - color, 0); + color, flags); if (!hole_node) return -ENOSPC; @@ -226,13 +227,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, } EXPORT_SYMBOL(drm_mm_insert_node_generic); -int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, unsigned alignment) -{ - return drm_mm_insert_node_generic(mm, node, size, alignment, 0); -} -EXPORT_SYMBOL(drm_mm_insert_node); - static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, struct drm_mm_node *node, unsigned long size, unsigned alignment, @@ -313,13 +307,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic); */ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, unsigned long size, unsigned alignment, unsigned long color, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, + enum drm_mm_search_flags flags) { struct drm_mm_node *hole_node; hole_node = drm_mm_search_free_in_range_generic(mm, size, alignment, color, - start, end, 0); + start, end, flags); if (!hole_node) return -ENOSPC; @@ -330,14 +325,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n } EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); -int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, unsigned alignment, - unsigned long start, unsigned long end) -{ - return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end); -} -EXPORT_SYMBOL(drm_mm_insert_node_in_range); - /** * Remove a memory node from the allocator. */ @@ -413,7 +400,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, unsigned long size, unsigned alignment, unsigned long color, - bool best_match) + enum drm_mm_search_flags flags) { struct drm_mm_node *entry; struct drm_mm_node *best; @@ -436,7 +423,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; - if (!best_match) + if (!(flags & DRM_MM_SEARCH_BEST)) return entry; if (entry->size < best_size) { @@ -455,7 +442,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, unsigned long color, unsigned long start, unsigned long end, - bool best_match) + enum drm_mm_search_flags flags) { struct drm_mm_node *entry; struct drm_mm_node *best; @@ -483,7 +470,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; - if (!best_match) + if (!(flags & DRM_MM_SEARCH_BEST)) return entry; if (entry->size < best_size) { @@ -629,8 +616,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block); * corrupted. * * When the scan list is empty, the selected memory nodes can be freed. An - * immediately following drm_mm_search_free with best_match = 0 will then return - * the just freed block (because its at the top of the free_stack list). + * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then + * return the just freed block (because its at the top of the free_stack list). * * Returns one if this block should be evicted, zero otherwise. Will always * return zero when no hole has been found. diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index b966cea95f11..3837481d5607 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -241,8 +241,8 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, goto out_unlock; } - ret = drm_mm_insert_node_generic(&mgr->vm_addr_space_mm, - &node->vm_node, pages, 0, 0); + ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, + pages, 0, DRM_MM_SEARCH_DEFAULT); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2aa0894b59cd..ea2d83d7324e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3085,7 +3085,8 @@ search_free: ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, &obj->gtt_space, size, alignment, - obj->cache_level, 0, gtt_max); + obj->cache_level, 0, gtt_max, + DRM_MM_SEARCH_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 55218332e625..e3551706f4ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -115,10 +115,12 @@ static int i915_setup_compression(struct drm_device *dev, int size) /* Try to over-allocate to reduce reallocations and fragmentation */ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size <<= 1, 4096, 0); + size <<= 1, 4096, + DRM_MM_SEARCH_DEFAULT); if (!compressed_fb) compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size >>= 1, 4096, 0); + size >>= 1, 4096, + DRM_MM_SEARCH_DEFAULT); if (compressed_fb) compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); if (!compressed_fb) @@ -130,7 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size) I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, - 4096, 4096, 0); + 4096, 4096, + DRM_MM_SEARCH_DEFAULT); if (compressed_llb) compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); @@ -328,7 +331,8 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) if (size == 0) return NULL; - stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); + stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, + DRM_MM_SEARCH_DEFAULT); if (stolen) stolen = drm_mm_get_block(stolen, size, 4096); if (stolen == NULL) diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 9a43d98e5003..23a234985941 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file, if (pool == AGP_TYPE) { retval = drm_mm_insert_node(&dev_priv->agp_mm, &item->mm_node, - mem->size, 0); + mem->size, 0, + DRM_MM_SEARCH_DEFAULT); offset = item->mm_node.start; } else { #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) @@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file, #else retval = drm_mm_insert_node(&dev_priv->vram_mm, &item->mm_node, - mem->size, 0); + mem->size, 0, + DRM_MM_SEARCH_DEFAULT); offset = item->mm_node.start; #endif } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index e4367f91472a..e4be29efba6b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -69,7 +69,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, spin_lock(&rman->lock); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, - placement->fpfn, lpfn, 1); + placement->fpfn, lpfn, + DRM_MM_SEARCH_BEST); if (unlikely(node == NULL)) { spin_unlock(&rman->lock); return 0; diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 0ab93ff09873..7e3ad87c366c 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data, if (mem->type == VIA_MEM_AGP) retval = drm_mm_insert_node(&dev_priv->agp_mm, &item->mm_node, - tmpSize, 0); + tmpSize, 0, DRM_MM_SEARCH_DEFAULT); else retval = drm_mm_insert_node(&dev_priv->vram_mm, &item->mm_node, - tmpSize, 0); + tmpSize, 0, DRM_MM_SEARCH_DEFAULT); if (retval) goto fail_alloc; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 98cb50ea6acb..439d1a17d3b1 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -44,6 +44,11 @@ #include #endif +enum drm_mm_search_flags { + DRM_MM_SEARCH_DEFAULT = 0, + DRM_MM_SEARCH_BEST = 1 << 0, +}; + struct drm_mm_node { struct list_head node_list; struct list_head hole_stack; @@ -189,28 +194,41 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range( start, end, 1); } -extern int drm_mm_insert_node(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long size, - unsigned alignment); -extern int drm_mm_insert_node_in_range(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end); extern int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, unsigned long size, unsigned alignment, - unsigned long color); + unsigned long color, + enum drm_mm_search_flags flags); +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long size, + unsigned alignment, + enum drm_mm_search_flags flags) +{ + return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); +} + extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, unsigned long size, unsigned alignment, unsigned long color, unsigned long start, - unsigned long end); + unsigned long end, + enum drm_mm_search_flags flags); +static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end, + enum drm_mm_search_flags flags) +{ + return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, + 0, start, end, flags); +} + extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_remove_node(struct drm_mm_node *node); extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); @@ -218,7 +236,7 @@ extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, unsigned long size, unsigned alignment, unsigned long color, - bool best_match); + enum drm_mm_search_flags flags); extern struct drm_mm_node *drm_mm_search_free_in_range_generic( const struct drm_mm *mm, unsigned long size, @@ -226,13 +244,13 @@ extern struct drm_mm_node *drm_mm_search_free_in_range_generic( unsigned long color, unsigned long start, unsigned long end, - bool best_match); + enum drm_mm_search_flags flags); static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, - bool best_match) + enum drm_mm_search_flags flags) { - return drm_mm_search_free_generic(mm,size, alignment, 0, best_match); + return drm_mm_search_free_generic(mm,size, alignment, 0, flags); } static inline struct drm_mm_node *drm_mm_search_free_in_range( const struct drm_mm *mm, @@ -240,10 +258,10 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range( unsigned alignment, unsigned long start, unsigned long end, - bool best_match) + enum drm_mm_search_flags flags) { return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, - start, end, best_match); + start, end, flags); } extern void drm_mm_init(struct drm_mm *mm, -- cgit v1.2.3 From baa7094355a10b432bbccacb925da4bdac861c8d Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 2 Aug 2013 13:27:49 -0400 Subject: drm: const'ify ioctls table (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because, there is no reason for it not to be const. v1: original v2: fix compile break in vmwgfx, and couple related cleanups suggested by Ville Syrjälä Signed-off-by: Rob Clark Reviewed-by: Ville Syrjälä Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/exynos/exynos_drm_drv.c | 4 ++-- drivers/gpu/drm/gma500/psb_drv.c | 2 +- drivers/gpu/drm/i810/i810_dma.c | 2 +- drivers/gpu/drm/i810/i810_drv.h | 2 +- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/mga/mga_drv.h | 2 +- drivers/gpu/drm/mga/mga_state.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++--- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_ioctl.c | 2 +- drivers/gpu/drm/r128/r128_drv.h | 2 +- drivers/gpu/drm/r128/r128_state.c | 2 +- drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/radeon_kms.c | 2 +- drivers/gpu/drm/savage/savage_bci.c | 2 +- drivers/gpu/drm/savage/savage_drv.h | 2 +- drivers/gpu/drm/sis/sis_drv.h | 2 +- drivers/gpu/drm/sis/sis_mm.c | 2 +- drivers/gpu/drm/via/via_dma.c | 2 +- drivers/gpu/drm/via/via_drv.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 4 ++-- drivers/gpu/host1x/drm/drm.c | 2 +- drivers/staging/imx-drm/imx-drm-core.c | 2 +- include/drm/drmP.h | 2 +- 26 files changed, 29 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 21fc28ae5e6e..df81d3c959b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { .close = drm_gem_vm_close, }; -static struct drm_ioctl_desc exynos_ioctls[] = { +static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, @@ -277,6 +277,7 @@ static struct drm_driver exynos_drm_driver = { .gem_prime_export = exynos_dmabuf_prime_export, .gem_prime_import = exynos_dmabuf_prime_import, .ioctls = exynos_ioctls, + .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = { static int exynos_drm_platform_probe(struct platform_device *pdev) { pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); return drm_platform_init(&exynos_drm_driver, pdev); } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index ed06d5ce3757..d13c2fc848bc 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data, static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -static struct drm_ioctl_desc psb_ioctls[] = { +static const struct drm_ioctl_desc psb_ioctls[] = { DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl, DRM_AUTH), diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index ada49eda489f..eac755bb8f9b 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -1241,7 +1241,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev) return 0; } -struct drm_ioctl_desc i810_ioctls[] = { +const struct drm_ioctl_desc i810_ioctls[] = { DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index 6e0acad9e0f5..d4d16eddd651 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev, extern int i810_driver_device_is_agp(struct drm_device *dev); extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -extern struct drm_ioctl_desc i810_ioctls[]; +extern const struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6ce903306320..78ad4dcc8e2f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1834,7 +1834,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) kfree(file_priv); } -struct drm_ioctl_desc i915_ioctls[] = { +const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5cb3e4d34230..3dd5731769fa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1566,7 +1566,7 @@ struct drm_i915_file_private { #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) -extern struct drm_ioctl_desc i915_ioctls[]; +extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc __always_unused; extern int i915_panel_ignore_lid __read_mostly; diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index 54558a01969a..ca4bc54ea214 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h @@ -149,7 +149,7 @@ typedef struct drm_mga_private { unsigned int agp_size; } drm_mga_private_t; -extern struct drm_ioctl_desc mga_ioctls[]; +extern const struct drm_ioctl_desc mga_ioctls[]; extern int mga_max_ioctl; /* mga_dma.c */ diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 9c145143ad0f..37cc2fb4eadd 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -1083,7 +1083,7 @@ file_priv) return 0; } -struct drm_ioctl_desc mga_ioctls[] = { +const struct drm_ioctl_desc mga_ioctls[] = { DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a900cde497a6..2c2097af2378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -640,7 +640,7 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) nouveau_cli_destroy(cli); } -static struct drm_ioctl_desc +static const struct drm_ioctl_desc nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -695,6 +695,7 @@ driver = { .disable_vblank = nouveau_drm_vblank_disable, .ioctls = nouveau_ioctls, + .num_ioctls = ARRAY_SIZE(nouveau_ioctls), .fops = &nouveau_driver_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -765,8 +766,6 @@ nouveau_drm_pci_driver = { static int __init nouveau_drm_init(void) { - driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls); - if (nouveau_modeset == -1) { #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 1ddd1a15764d..2f9e22e22bd4 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data, return ret; } -static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { +static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 57cb7a8e6fb5..afd09d48d72c 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -319,7 +319,7 @@ struct qxl_device { /* forward declaration for QXL_INFO_IO */ void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); -extern struct drm_ioctl_desc qxl_ioctls[]; +extern const struct drm_ioctl_desc qxl_ioctls[]; extern int qxl_max_ioctl; int qxl_driver_load(struct drm_device *dev, unsigned long flags); diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250d..6cd7273c0804 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -402,7 +402,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, return ret; } -struct drm_ioctl_desc qxl_ioctls[] = { +const struct drm_ioctl_desc qxl_ioctls[] = { DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 930c71b2fb5e..56eb5e3f5439 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h @@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv { drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; -extern struct drm_ioctl_desc r128_ioctls[]; +extern const struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 19bb7e6f3d9a..01dd9aef9f0e 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev) r128_do_cleanup_cce(dev); } -struct drm_ioctl_desc r128_ioctls[] = { +const struct drm_ioctl_desc r128_ioctls[] = { DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4071fe70c416..fa7a7e13da6c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -109,7 +109,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos); -extern struct drm_ioctl_desc radeon_ioctls_kms[]; +extern const struct drm_ioctl_desc radeon_ioctls_kms[]; extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); int radeon_mode_dumb_mmap(struct drm_file *filp, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 07b023655bb4..866c2b70aa6f 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -722,7 +722,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms) KMS_INVALID_IOCTL(radeon_surface_free_kms) -struct drm_ioctl_desc radeon_ioctls_kms[] = { +const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index bd6b2cf508d5..b17d0710871a 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) drm_idlelock_release(&file_priv->master->lock); } -struct drm_ioctl_desc savage_ioctls[] = { +const struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h index c05082a59f6f..335f8fcf1041 100644 --- a/drivers/gpu/drm/savage/savage_drv.h +++ b/drivers/gpu/drm/savage/savage_drv.h @@ -104,7 +104,7 @@ enum savage_family { S3_LAST }; -extern struct drm_ioctl_desc savage_ioctls[]; +extern const struct drm_ioctl_desc savage_ioctls[]; extern int savage_max_ioctl; #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h index 13b527bb83be..c31c0253054d 100644 --- a/drivers/gpu/drm/sis/sis_drv.h +++ b/drivers/gpu/drm/sis/sis_drv.h @@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); extern void sis_lastclose(struct drm_device *dev); -extern struct drm_ioctl_desc sis_ioctls[]; +extern const struct drm_ioctl_desc sis_ioctls[]; extern int sis_max_ioctl; #endif diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 23a234985941..01857d836350 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -350,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, return; } -struct drm_ioctl_desc sis_ioctls[] = { +const struct drm_ioctl_desc sis_ioctls[] = { DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 13558f5a2422..652f9b43ec9d 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * return ret; } -struct drm_ioctl_desc via_ioctls[] = { +const struct drm_ioctl_desc via_ioctls[] = { DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h index 893a65090c36..a811ef2b505f 100644 --- a/drivers/gpu/drm/via/via_drv.h +++ b/drivers/gpu/drm/via/via_drv.h @@ -114,7 +114,7 @@ enum via_family { #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) #define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) -extern struct drm_ioctl_desc via_ioctls[]; +extern const struct drm_ioctl_desc via_ioctls[]; extern int via_max_ioctl; extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 78e21649d48a..50861504b5d9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -124,7 +124,7 @@ * Ioctl definitions. */ -static struct drm_ioctl_desc vmw_ioctls[] = { +static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, @@ -782,7 +782,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { - struct drm_ioctl_desc *ioctl = + const struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; if (unlikely(ioctl->cmd_drv != cmd)) { diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c index 87aa09bf7ba8..b128b90a94f6 100644 --- a/drivers/gpu/host1x/drm/drm.c +++ b/drivers/gpu/host1x/drm/drm.c @@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data, } #endif -static struct drm_ioctl_desc tegra_drm_ioctls[] = { +static const struct drm_ioctl_desc tegra_drm_ioctls[] = { #ifdef CONFIG_DRM_TEGRA_STAGING DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index a827858af944..a8900496b980 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -783,7 +783,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector) } EXPORT_SYMBOL_GPL(imx_drm_remove_connector); -static struct drm_ioctl_desc imx_drm_ioctls[] = { +static const struct drm_ioctl_desc imx_drm_ioctls[] = { /* none so far */ }; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 3b7fda557b8d..1a4eba627e79 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -965,7 +965,7 @@ struct drm_driver { u32 driver_features; int dev_priv_size; - struct drm_ioctl_desc *ioctls; + const struct drm_ioctl_desc *ioctls; int num_ioctls; const struct file_operations *fops; union { -- cgit v1.2.3 From 08fcd72b14e440feb748ddc33e7057716116a74a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 2 Aug 2013 14:09:24 +0300 Subject: drm: don't push static constants on stack for %*ph There is no need to pass constants via stack. The width may be explicitly specified in the format. Signed-off-by: Andy Shevchenko Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/core/engine/disp/dport.c | 2 +- drivers/gpu/drm/radeon/atombios_dp.c | 2 +- drivers/gpu/drm/udl/udl_main.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c index 31cc8fe8e7f0..054d9cff4f53 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c @@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay) if (ret) return ret; - DBG("status %*ph\n", 6, dp->stat); + DBG("status %6ph\n", dp->stat); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..c239739736db 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -550,7 +550,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, return false; } - DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); + DRM_DEBUG_KMS("link status %6ph\n", link_status); return true; } diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0ce2d7195256..f5ae57406f34 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); if (total_len > 5) { - DRM_INFO("vendor descriptor length:%x data:%*ph\n", - total_len, 11, desc); + DRM_INFO("vendor descriptor length:%x data:%11ph\n", + total_len, desc); if ((desc[0] != total_len) || /* descriptor length */ (desc[1] != 0x5f) || /* vendor descriptor type */ -- cgit v1.2.3 From 28ec711cd427f8b61f73712a43b8100ba8ca933b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 27 Jul 2013 16:37:00 +0200 Subject: drm/agp: move AGP cleanup paths to drm_agpsupport.c Introduce two new helpers, drm_agp_clear() and drm_agp_destroy() which clear all AGP mappings and destroy the AGP head. This allows to reduce the AGP code in core DRM and move it all to drm_agpsupport.c. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_agpsupport.c | 51 ++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_drv.c | 21 +---------------- drivers/gpu/drm/drm_pci.c | 12 ++++++++++ drivers/gpu/drm/drm_stub.c | 9 ++----- include/drm/drmP.h | 3 +++ 5 files changed, 69 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 3d8fed179797..e301d653d97e 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -423,6 +423,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) return head; } +/** + * drm_agp_clear - Clear AGP resource list + * @dev: DRM device + * + * Iterate over all AGP resources and remove them. But keep the AGP head + * intact so it can still be used. It is safe to call this if AGP is disabled or + * was already removed. + * + * If DRIVER_MODESET is active, nothing is done to protect the modesetting + * resources from getting destroyed. Drivers are responsible of cleaning them up + * during device shutdown. + */ +void drm_agp_clear(struct drm_device *dev) +{ + struct drm_agp_mem *entry, *tempe; + + if (!drm_core_has_AGP(dev) || !dev->agp) + return; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { + if (entry->bound) + drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + kfree(entry); + } + INIT_LIST_HEAD(&dev->agp->memory); + + if (dev->agp->acquired) + drm_agp_release(dev); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; +} + +/** + * drm_agp_destroy - Destroy AGP head + * @dev: DRM device + * + * Destroy resources that were previously allocated via drm_agp_initp. Caller + * must ensure to clean up all AGP resources before calling this. See + * drm_agp_clear(). + * + * Call this to destroy AGP heads allocated via drm_agp_init(). + */ +void drm_agp_destroy(struct drm_agp_head *agp) +{ + kfree(agp); +} + /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 36103d1660d1..dddd79988ffc 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -195,27 +195,8 @@ int drm_lastclose(struct drm_device * dev) mutex_lock(&dev->struct_mutex); - /* Clear AGP information */ - if (drm_core_has_AGP(dev) && dev->agp && - !drm_core_check_feature(dev, DRIVER_MODESET)) { - struct drm_agp_mem *entry, *tempe; - - /* Remove AGP resources, but leave dev->agp - intact until drv_cleanup is called. */ - list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { - if (entry->bound) - drm_unbind_agp(entry->memory); - drm_free_agp(entry->memory, entry->pages); - kfree(entry); - } - INIT_LIST_HEAD(&dev->agp->memory); - - if (dev->agp->acquired) - drm_agp_release(dev); + drm_agp_clear(dev); - dev->agp->acquired = 0; - dev->agp->enabled = 0; - } if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && !drm_core_check_feature(dev, DRIVER_MODESET)) { drm_sg_cleanup(dev->sg); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index a7b46ff80b0f..0f54ad8a9ced 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -283,6 +283,17 @@ static int drm_pci_agp_init(struct drm_device *dev) return 0; } +static void drm_pci_agp_destroy(struct drm_device *dev) +{ + if (drm_core_has_AGP(dev) && dev->agp) { + if (drm_core_has_MTRR(dev)) + arch_phys_wc_del(dev->agp->agp_mtrr); + drm_agp_clear(dev); + drm_agp_destroy(dev->agp); + dev->agp = NULL; + } +} + static struct drm_bus drm_pci_bus = { .bus_type = DRIVER_BUS_PCI, .get_irq = drm_pci_get_irq, @@ -291,6 +302,7 @@ static struct drm_bus drm_pci_bus = { .set_unique = drm_pci_set_unique, .irq_by_busid = drm_pci_irq_by_busid, .agp_init = drm_pci_agp_init, + .agp_destroy = drm_pci_agp_destroy, }; /** diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 327ca19cda85..d663f7d66dab 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -451,16 +451,11 @@ void drm_put_dev(struct drm_device *dev) drm_lastclose(dev); - if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp) - arch_phys_wc_del(dev->agp->agp_mtrr); - if (dev->driver->unload) dev->driver->unload(dev); - if (drm_core_has_AGP(dev) && dev->agp) { - kfree(dev->agp); - dev->agp = NULL; - } + if (dev->driver->bus->agp_destroy) + dev->driver->bus->agp_destroy(dev); drm_vblank_cleanup(dev); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1a4eba627e79..fba547368a20 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -736,6 +736,7 @@ struct drm_bus { int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); /* hooks that are for PCI */ int (*agp_init)(struct drm_device *dev); + void (*agp_destroy)(struct drm_device *dev); }; @@ -1453,6 +1454,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data, /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); +extern void drm_agp_destroy(struct drm_agp_head *agp); +extern void drm_agp_clear(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From 78af329a85bee7dd4671c67abfecde37b0057b10 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 27 Jul 2013 13:37:59 +0200 Subject: drm/ttm: replace drm_mm_pre_get() by direct alloc Instead of calling drm_mm_pre_get() in a row, we now preallocate the node and then use the atomic insertion functions. This has the exact same semantics and there is no reason to use the racy pre-allocations. Note that ttm_bo_man_get_node() does not run in atomic context. Nouveau already uses GFP_KERNEL alloc in nouveau/nouveau_ttm.c in nouveau_gart_manager_new(). So we can do the same in ttm_bo_man_get_node(). Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo_manager.c | 42 +++++++++++++++++------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index e4be29efba6b..c58eba33bd5f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -61,29 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; - do { - ret = drm_mm_pre_get(mm); - if (unlikely(ret)) - return ret; - spin_lock(&rman->lock); - node = drm_mm_search_free_in_range(mm, - mem->num_pages, mem->page_alignment, - placement->fpfn, lpfn, - DRM_MM_SEARCH_BEST); - if (unlikely(node == NULL)) { - spin_unlock(&rman->lock); - return 0; - } - node = drm_mm_get_block_atomic_range(node, mem->num_pages, - mem->page_alignment, - placement->fpfn, - lpfn); - spin_unlock(&rman->lock); - } while (node == NULL); + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + spin_lock(&rman->lock); + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, + mem->page_alignment, + placement->fpfn, lpfn, + DRM_MM_SEARCH_BEST); + spin_unlock(&rman->lock); + + if (unlikely(ret)) { + kfree(node); + } else { + mem->mm_node = node; + mem->start = node->start; + } - mem->mm_node = node; - mem->start = node->start; return 0; } @@ -94,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, if (mem->mm_node) { spin_lock(&rman->lock); - drm_mm_put_block(mem->mm_node); + drm_mm_remove_node(mem->mm_node); spin_unlock(&rman->lock); + + kfree(mem->mm_node); mem->mm_node = NULL; } } -- cgit v1.2.3 From 06e78edff18195f8e416e6961fea7d88118a5c63 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 27 Jul 2013 16:21:27 +0200 Subject: drm/i915: pre-alloc instead of drm_mm search/get_block i915 is the last user of the weird search+get_block drm_mm API. Convert it to an explicit kmalloc()+insert_node(). This drops the last user of the node-cache in drm_mm. We can remove it now in a follow-up patch. v2: - simplify error path in i915_setup_compression() v3: - simplify error path even more Cc: Chris Wilson Acked-by: Daniel Vetter Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_gem_stolen.c | 78 ++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e3551706f4ff..a3d1a125b5e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -112,34 +112,36 @@ static int i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); + int ret; - /* Try to over-allocate to reduce reallocations and fragmentation */ - compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size <<= 1, 4096, - DRM_MM_SEARCH_DEFAULT); - if (!compressed_fb) - compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size >>= 1, 4096, - DRM_MM_SEARCH_DEFAULT); - if (compressed_fb) - compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); if (!compressed_fb) - goto err; + goto err_llb; + + /* Try to over-allocate to reduce reallocations and fragmentation */ + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + size >>= 1, 4096, + DRM_MM_SEARCH_DEFAULT); + if (ret) + goto err_llb; if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { - compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, - 4096, 4096, - DRM_MM_SEARCH_DEFAULT); - if (compressed_llb) - compressed_llb = drm_mm_get_block(compressed_llb, - 4096, 4096); + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) goto err_fb; + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, + 4096, 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) + goto err_fb; + dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, @@ -157,8 +159,10 @@ static int i915_setup_compression(struct drm_device *dev, int size) return 0; err_fb: - drm_mm_put_block(compressed_fb); -err: + kfree(compressed_llb); + drm_mm_remove_node(compressed_fb); +err_llb: + kfree(compressed_fb); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } @@ -186,11 +190,15 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) if (dev_priv->fbc.size == 0) return; - if (dev_priv->fbc.compressed_fb) - drm_mm_put_block(dev_priv->fbc.compressed_fb); + if (dev_priv->fbc.compressed_fb) { + drm_mm_remove_node(dev_priv->fbc.compressed_fb); + kfree(dev_priv->fbc.compressed_fb); + } - if (dev_priv->fbc.compressed_llb) - drm_mm_put_block(dev_priv->fbc.compressed_llb); + if (dev_priv->fbc.compressed_llb) { + drm_mm_remove_node(dev_priv->fbc.compressed_llb); + kfree(dev_priv->fbc.compressed_llb); + } dev_priv->fbc.size = 0; } @@ -323,6 +331,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; + int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; @@ -331,18 +340,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) if (size == 0) return NULL; - stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, - DRM_MM_SEARCH_DEFAULT); - if (stolen) - stolen = drm_mm_get_block(stolen, size, 4096); - if (stolen == NULL) + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) return NULL; + ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, + 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) { + kfree(stolen); + return NULL; + } + obj = _i915_gem_object_create_stolen(dev, stolen); if (obj) return obj; - drm_mm_put_block(stolen); + drm_mm_remove_node(stolen); + kfree(stolen); return NULL; } @@ -386,7 +400,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, obj = _i915_gem_object_create_stolen(dev, stolen); if (obj == NULL) { DRM_DEBUG_KMS("failed to allocate stolen object\n"); - drm_mm_put_block(stolen); + drm_mm_remove_node(stolen); + kfree(stolen); return NULL; } @@ -426,7 +441,8 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { if (obj->stolen) { - drm_mm_put_block(obj->stolen); + drm_mm_remove_node(obj->stolen); + kfree(obj->stolen); obj->stolen = NULL; } } -- cgit v1.2.3 From c700c67bae6698fbc6bd20e2ae5dc62ddd367b3b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sat, 27 Jul 2013 13:39:28 +0200 Subject: drm/mm: remove unused API We used to pre-allocate drm_mm nodes and save them in a linked list for later usage so we always have spare ones in atomic contexts. However, this is really racy if multiple threads are in an atomic context at the same time and we don't have enough spare nodes. Moreover, all remaining users run in user-context and just lock drm_mm with a spinlock. So we can easily preallocate the node, take the spinlock and insert the node. This may have worked well with BKL in place, however, with today's infrastructure it really doesn't make any sense. Besides, most users can easily embed drm_mm_node into their objects so no allocation is needed at all. Thus, remove the old pre-alloc API and all the helpers that it provides. Drivers have already been converted and we should not use the old API for new code, anymore. Signed-off-by: David Herrmann Acked-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 160 ++++++----------------------------------------- include/drm/drm_mm.h | 95 ---------------------------- 2 files changed, 20 insertions(+), 235 deletions(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 9a383272c8c7..aded1e11e8ff 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -49,58 +49,18 @@ #define MM_UNUSED_TARGET 4 -static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) -{ - struct drm_mm_node *child; - - if (atomic) - child = kzalloc(sizeof(*child), GFP_ATOMIC); - else - child = kzalloc(sizeof(*child), GFP_KERNEL); - - if (unlikely(child == NULL)) { - spin_lock(&mm->unused_lock); - if (list_empty(&mm->unused_nodes)) - child = NULL; - else { - child = - list_entry(mm->unused_nodes.next, - struct drm_mm_node, node_list); - list_del(&child->node_list); - --mm->num_unused; - } - spin_unlock(&mm->unused_lock); - } - return child; -} - -/* drm_mm_pre_get() - pre allocate drm_mm_node structure - * drm_mm: memory manager struct we are pre-allocating for - * - * Returns 0 on success or -ENOMEM if allocation fails. - */ -int drm_mm_pre_get(struct drm_mm *mm) -{ - struct drm_mm_node *node; - - spin_lock(&mm->unused_lock); - while (mm->num_unused < MM_UNUSED_TARGET) { - spin_unlock(&mm->unused_lock); - node = kzalloc(sizeof(*node), GFP_KERNEL); - spin_lock(&mm->unused_lock); - - if (unlikely(node == NULL)) { - int ret = (mm->num_unused < 2) ? -ENOMEM : 0; - spin_unlock(&mm->unused_lock); - return ret; - } - ++mm->num_unused; - list_add_tail(&node->node_list, &mm->unused_nodes); - } - spin_unlock(&mm->unused_lock); - return 0; -} -EXPORT_SYMBOL(drm_mm_pre_get); +static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + enum drm_mm_search_flags flags); +static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + unsigned long start, + unsigned long end, + enum drm_mm_search_flags flags); static void drm_mm_insert_helper(struct drm_mm_node *hole_node, struct drm_mm_node *node, @@ -187,24 +147,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) } EXPORT_SYMBOL(drm_mm_reserve_node); -struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, - unsigned long size, - unsigned alignment, - unsigned long color, - int atomic) -{ - struct drm_mm_node *node; - - node = drm_mm_kmalloc(hole_node->mm, atomic); - if (unlikely(node == NULL)) - return NULL; - - drm_mm_insert_helper(hole_node, node, size, alignment, color); - - return node; -} -EXPORT_SYMBOL(drm_mm_get_block_generic); - /** * Search for free space and insert a preallocated memory node. Returns * -ENOSPC if no suitable free area is available. The preallocated memory node @@ -279,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, } } -struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end, - int atomic) -{ - struct drm_mm_node *node; - - node = drm_mm_kmalloc(hole_node->mm, atomic); - if (unlikely(node == NULL)) - return NULL; - - drm_mm_insert_helper_range(hole_node, node, size, alignment, color, - start, end); - - return node; -} -EXPORT_SYMBOL(drm_mm_get_block_range_generic); - /** * Search for free space and insert a preallocated memory node. Returns * -ENOSPC if no suitable free area is available. This is for range @@ -359,28 +280,6 @@ void drm_mm_remove_node(struct drm_mm_node *node) } EXPORT_SYMBOL(drm_mm_remove_node); -/* - * Remove a memory node from the allocator and free the allocated struct - * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the - * drm_mm_get_block functions. - */ -void drm_mm_put_block(struct drm_mm_node *node) -{ - - struct drm_mm *mm = node->mm; - - drm_mm_remove_node(node); - - spin_lock(&mm->unused_lock); - if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&node->node_list, &mm->unused_nodes); - ++mm->num_unused; - } else - kfree(node); - spin_unlock(&mm->unused_lock); -} -EXPORT_SYMBOL(drm_mm_put_block); - static int check_free_hole(unsigned long start, unsigned long end, unsigned long size, unsigned alignment) { @@ -396,11 +295,11 @@ static int check_free_hole(unsigned long start, unsigned long end, return end >= start + size; } -struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags flags) +static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + enum drm_mm_search_flags flags) { struct drm_mm_node *entry; struct drm_mm_node *best; @@ -434,9 +333,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, return best; } -EXPORT_SYMBOL(drm_mm_search_free_generic); -struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, +static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, unsigned long size, unsigned alignment, unsigned long color, @@ -481,7 +379,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, return best; } -EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); /** * Moves an allocation. To be used with embedded struct drm_mm_node. @@ -654,10 +551,7 @@ EXPORT_SYMBOL(drm_mm_clean); void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->hole_stack); - INIT_LIST_HEAD(&mm->unused_nodes); - mm->num_unused = 0; mm->scanned_blocks = 0; - spin_lock_init(&mm->unused_lock); /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); @@ -677,22 +571,8 @@ EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(struct drm_mm * mm) { - struct drm_mm_node *entry, *next; - - if (WARN(!list_empty(&mm->head_node.node_list), - "Memory manager not clean. Delaying takedown\n")) { - return; - } - - spin_lock(&mm->unused_lock); - list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { - list_del(&entry->node_list); - kfree(entry); - --mm->num_unused; - } - spin_unlock(&mm->unused_lock); - - BUG_ON(mm->num_unused != 0); + WARN(!list_empty(&mm->head_node.node_list), + "Memory manager not clean during takedown.\n"); } EXPORT_SYMBOL(drm_mm_takedown); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 439d1a17d3b1..cba67865d18f 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -70,9 +70,6 @@ struct drm_mm { /* head_node.node_list is the list of all memory nodes, ordered * according to the (increasing) start address of the memory node. */ struct drm_mm_node head_node; - struct list_head unused_nodes; - int num_unused; - spinlock_t unused_lock; unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_color; @@ -123,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ &(mm)->head_node.node_list, \ node_list) -#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ - for (entry = (mm)->prev_scanned_node, \ - next = entry ? list_entry(entry->node_list.next, \ - struct drm_mm_node, node_list) : NULL; \ - entry != NULL; entry = next, \ - next = entry ? list_entry(entry->node_list.next, \ - struct drm_mm_node, node_list) : NULL) \ /* Note that we need to unroll list_for_each_entry in order to inline * setting hole_start and hole_end on each iteration and keep the @@ -147,52 +137,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) * Basic range manager support (drm_mm.c) */ extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); -extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long color, - int atomic); -extern struct drm_mm_node *drm_mm_get_block_range_generic( - struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end, - int atomic); - -static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, - unsigned long size, - unsigned alignment) -{ - return drm_mm_get_block_generic(parent, size, alignment, 0, 0); -} -static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, - unsigned long size, - unsigned alignment) -{ - return drm_mm_get_block_generic(parent, size, alignment, 0, 1); -} -static inline struct drm_mm_node *drm_mm_get_block_range( - struct drm_mm_node *parent, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end) -{ - return drm_mm_get_block_range_generic(parent, size, alignment, 0, - start, end, 0); -} -static inline struct drm_mm_node *drm_mm_get_block_atomic_range( - struct drm_mm_node *parent, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end) -{ - return drm_mm_get_block_range_generic(parent, size, alignment, 0, - start, end, 1); -} extern int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, @@ -229,52 +173,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 0, start, end, flags); } -extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_remove_node(struct drm_mm_node *node); extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags flags); -extern struct drm_mm_node *drm_mm_search_free_in_range_generic( - const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end, - enum drm_mm_search_flags flags); -static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - enum drm_mm_search_flags flags) -{ - return drm_mm_search_free_generic(mm,size, alignment, 0, flags); -} -static inline struct drm_mm_node *drm_mm_search_free_in_range( - const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end, - enum drm_mm_search_flags flags) -{ - return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, - start, end, flags); -} - extern void drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); extern void drm_mm_takedown(struct drm_mm *mm); extern int drm_mm_clean(struct drm_mm *mm); -extern int drm_mm_pre_get(struct drm_mm *mm); - -static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) -{ - return block->mm; -} void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, -- cgit v1.2.3 From abf190351b49937335130970a99a0b4275402b5e Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Thu, 25 Jul 2013 14:08:51 +0200 Subject: drm/ttm: inline drm_bo_setup_vm() This helper is used only once and just wraps a call to drm_vma_offset_add(). Remove this unneeded indirection to safe 10 lines of code. Signed-off-by: David Herrmann Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 050edfaf5b88..f1a857ec1021 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -45,7 +45,6 @@ #define TTM_DEBUG(fmt, arg...) #define TTM_BO_HASH_ORDER 13 -static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static void ttm_bo_global_kobj_release(struct kobject *kobj); @@ -1134,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, if (likely(!ret) && (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg)) - ret = ttm_bo_setup_vm(bo); + ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + bo->mem.num_pages); locked = ww_mutex_trylock(&bo->resv->lock); WARN_ON(!locked); @@ -1506,24 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); -/** - * ttm_bo_setup_vm: - * - * @bo: the buffer to allocate address space for - * - * Allocate address space in the drm device so that applications - * can mmap the buffer and access the contents. This only - * applies to ttm_bo_type_device objects as others are not - * placed in the drm device address space. - */ - -static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - - return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, - bo->mem.num_pages); -} int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) -- cgit v1.2.3 From 80dcfdbd68b094f21f7ce222fb8039123f5b4cbe Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:01 -0700 Subject: drm/i915: Rework __i915_gem_shrink In order to do this for all VMs, it's convenient to rework the logic a bit. This should have no functional impact. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3debb35b7195..4ca8f9fad913 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1707,9 +1707,14 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, } list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) { - if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && - i915_gem_object_unbind(obj) == 0 && - i915_gem_object_put_pages(obj) == 0) { + + if (!i915_gem_object_is_purgeable(obj) && purgeable_only) + continue; + + if (i915_gem_object_unbind(obj)) + continue; + + if (!i915_gem_object_put_pages(obj)) { count += obj->base.size >> PAGE_SHIFT; if (count >= target) return count; -- cgit v1.2.3 From 07fe0b12800d4752d729d4122c01f41f80a5ba5a Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:10 -0700 Subject: drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 134 +++++++++++++++++++---------- drivers/gpu/drm/i915/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 9 +- drivers/gpu/drm/i915/i915_trace.h | 37 ++++---- 7 files changed, 120 insertions(+), 71 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 748af58b0cea..d2935b4fd695 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1800,7 +1800,7 @@ i915_drop_caches_set(void *data, u64 val) if (obj->pin_count) continue; - ret = i915_gem_object_unbind(obj); + ret = i915_gem_object_ggtt_unbind(obj); if (ret) goto unlock; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4f93467fdfdd..8205b4b4f2be 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1737,7 +1737,8 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, bool map_and_fenceable, bool nonblocking); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int __must_check i915_vma_unbind(struct i915_vma *vma); +int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4ca8f9fad913..db9792c47827 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -38,10 +38,12 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, - unsigned alignment, - bool map_and_fenceable, - bool nonblocking); +static __must_check int +i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -1692,7 +1694,6 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, bool purgeable_only) { struct drm_i915_gem_object *obj, *next; - struct i915_address_space *vm = &dev_priv->gtt.base; long count = 0; list_for_each_entry_safe(obj, next, @@ -1706,13 +1707,16 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, } } - list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) { + list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, + global_list) { + struct i915_vma *vma, *v; if (!i915_gem_object_is_purgeable(obj) && purgeable_only) continue; - if (i915_gem_object_unbind(obj)) - continue; + list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + break; if (!i915_gem_object_put_pages(obj)) { count += obj->base.size >> PAGE_SHIFT; @@ -2596,17 +2600,13 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) old_write_domain); } -/** - * Unbinds an object from the GTT aperture. - */ -int -i915_gem_object_unbind(struct drm_i915_gem_object *obj) +int i915_vma_unbind(struct i915_vma *vma) { + struct drm_i915_gem_object *obj = vma->obj; drm_i915_private_t *dev_priv = obj->base.dev->dev_private; - struct i915_vma *vma; int ret; - if (!i915_gem_obj_ggtt_bound(obj)) + if (list_empty(&vma->vma_link)) return 0; if (obj->pin_count) @@ -2629,7 +2629,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (ret) return ret; - trace_i915_gem_object_unbind(obj); + trace_i915_vma_unbind(vma); if (obj->has_global_gtt_mapping) i915_gem_gtt_unbind_object(obj); @@ -2644,7 +2644,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; - vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); list_del(&vma->vma_link); drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); @@ -2659,6 +2658,26 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return 0; } +/** + * Unbinds an object from the global GTT aperture. + */ +int +i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_address_space *ggtt = &dev_priv->gtt.base; + + if (!i915_gem_obj_ggtt_bound(obj)); + return 0; + + if (obj->pin_count) + return -EBUSY; + + BUG_ON(obj->pages == NULL); + + return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); +} + int i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3076,18 +3095,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev) * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, - unsigned alignment, - bool map_and_fenceable, - bool nonblocking) +i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; - size_t gtt_max = map_and_fenceable ? - dev_priv->gtt.mappable_end : dev_priv->gtt.base.total; + size_t gtt_max = + map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3132,15 +3151,18 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); + /* FIXME: For now we only ever use 1 VMA per object */ + BUG_ON(!i915_is_ggtt(vm)); + WARN_ON(!list_empty(&obj->vma_list)); + + vma = i915_gem_vma_create(obj, vm); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unpin; } search_free: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, - &vma->node, + ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, obj->cache_level, 0, gtt_max); if (ret) { @@ -3165,18 +3187,25 @@ search_free: list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &vm->inactive_list); - list_add(&vma->vma_link, &obj->vma_list); + + /* Keep GGTT vmas first to make debug easier */ + if (i915_is_ggtt(vm)) + list_add(&vma->vma_link, &obj->vma_list); + else + list_add_tail(&vma->vma_link, &obj->vma_list); fenceable = + i915_is_ggtt(vm) && i915_gem_obj_ggtt_size(obj) == fence_size && (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0; - mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <= - dev_priv->gtt.mappable_end; + mappable = + i915_is_ggtt(vm) && + vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; obj->map_and_fenceable = mappable && fenceable; - trace_i915_gem_object_bind(obj, map_and_fenceable); + trace_i915_vma_bind(vma, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; @@ -3345,7 +3374,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, list_for_each_entry(vma, &obj->vma_list, vma_link) { if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(vma); if (ret) return ret; @@ -3653,33 +3682,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, bool map_and_fenceable, bool nonblocking) { + struct i915_vma *vma; int ret; if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if (i915_gem_obj_ggtt_bound(obj)) { - if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) || + WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); + + vma = i915_gem_obj_to_vma(obj, vm); + + if (vma) { + if ((alignment && + vma->node.start & (alignment - 1)) || (map_and_fenceable && !obj->map_and_fenceable)) { WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - i915_gem_obj_ggtt_offset(obj), alignment, + i915_gem_obj_offset(obj, vm), alignment, map_and_fenceable, obj->map_and_fenceable); - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(vma); if (ret) return ret; } } - if (!i915_gem_obj_ggtt_bound(obj)) { + if (!i915_gem_obj_bound(obj, vm)) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - ret = i915_gem_object_bind_to_gtt(obj, alignment, - map_and_fenceable, - nonblocking); + ret = i915_gem_object_bind_to_vm(obj, vm, alignment, + map_and_fenceable, + nonblocking); if (ret) return ret; @@ -3975,6 +4010,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_vma *vma, *next; trace_i915_gem_object_destroy(obj); @@ -3982,15 +4018,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) i915_gem_detach_phys_object(dev, obj); obj->pin_count = 0; - if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { - bool was_interruptible; + /* NB: 0 or 1 elements */ + WARN_ON(!list_empty(&obj->vma_list) && + !list_is_singular(&obj->vma_list)); + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + int ret = i915_vma_unbind(vma); + if (WARN_ON(ret == -ERESTARTSYS)) { + bool was_interruptible; - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; - WARN_ON(i915_gem_object_unbind(obj)); + WARN_ON(i915_vma_unbind(vma)); - dev_priv->mm.interruptible = was_interruptible; + dev_priv->mm.interruptible = was_interruptible; + } } /* Stolen objects don't hold a ref, but do hold pin count. Fix that up diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 33d85a4447a6..9205a4179b7e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -147,7 +147,7 @@ found: struct drm_i915_gem_object, exec_list); if (ret == 0) - ret = i915_gem_object_unbind(obj); + ret = i915_gem_object_ggtt_unbind(obj); list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); @@ -185,7 +185,7 @@ i915_gem_evict_everything(struct drm_device *dev) /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) if (obj->pin_count == 0) - WARN_ON(i915_gem_object_unbind(obj)); + WARN_ON(i915_gem_object_ggtt_unbind(obj)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 9939d2ef3ea9..17be2e4bae6b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -556,7 +556,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, if ((entry->alignment && obj_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); else ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 92a8d279ca39..032e9ef9c896 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -360,17 +360,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj->map_and_fenceable = !i915_gem_obj_ggtt_bound(obj) || - (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && + (i915_gem_obj_ggtt_offset(obj) + + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { - u32 unfenced_alignment = + u32 unfenced_align = i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); - if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1)) - ret = i915_gem_object_unbind(obj); + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) + ret = i915_gem_object_ggtt_unbind(obj); } if (ret == 0) { diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 2933e2ffeaa4..e2c5ee6f6194 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create, TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) ); -TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), - TP_ARGS(obj, mappable), +TRACE_EVENT(i915_vma_bind, + TP_PROTO(struct i915_vma *vma, bool mappable), + TP_ARGS(vma, mappable), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) + __field(struct i915_address_space *, vm) __field(u32, offset) __field(u32, size) __field(bool, mappable) ), TP_fast_assign( - __entry->obj = obj; - __entry->offset = i915_gem_obj_ggtt_offset(obj); - __entry->size = i915_gem_obj_ggtt_size(obj); + __entry->obj = vma->obj; + __entry->vm = vma->vm; + __entry->offset = vma->node.start; + __entry->size = vma->node.size; __entry->mappable = mappable; ), - TP_printk("obj=%p, offset=%08x size=%x%s", + TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", __entry->obj, __entry->offset, __entry->size, - __entry->mappable ? ", mappable" : "") + __entry->mappable ? ", mappable" : "", + __entry->vm) ); -TRACE_EVENT(i915_gem_object_unbind, - TP_PROTO(struct drm_i915_gem_object *obj), - TP_ARGS(obj), +TRACE_EVENT(i915_vma_unbind, + TP_PROTO(struct i915_vma *vma), + TP_ARGS(vma), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) + __field(struct i915_address_space *, vm) __field(u32, offset) __field(u32, size) ), TP_fast_assign( - __entry->obj = obj; - __entry->offset = i915_gem_obj_ggtt_offset(obj); - __entry->size = i915_gem_obj_ggtt_size(obj); + __entry->obj = vma->obj; + __entry->vm = vma->vm; + __entry->offset = vma->node.start; + __entry->size = vma->node.size; ), - TP_printk("obj=%p, offset=%08x size=%x", - __entry->obj, __entry->offset, __entry->size) + TP_printk("obj=%p, offset=%08x size=%x vm=%p", + __entry->obj, __entry->offset, __entry->size, __entry->vm) ); TRACE_EVENT(i915_gem_object_change_domain, -- cgit v1.2.3 From f6cd1f15d345688cb95cc195aaf8b375f7de8cf6 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:11 -0700 Subject: drm/i915: Use new bind/unbind in eviction code Eviction code, like the rest of the converted code needs to be aware of the address space for which it is evicting (or the everything case, all addresses). With the updated bind/unbind interfaces of the last patch, we can now safely move the eviction code over. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 53 +++++++++++++++++++---------------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8205b4b4f2be..2421ad17831b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1983,7 +1983,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) /* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, +int __must_check i915_gem_evict_something(struct drm_device *dev, + struct i915_address_space *vm, + int min_size, unsigned alignment, unsigned cache_level, bool mappable, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index db9792c47827..6c8c6b6b91ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3166,7 +3166,7 @@ search_free: size, alignment, obj->cache_level, 0, gtt_max); if (ret) { - ret = i915_gem_evict_something(dev, size, alignment, + ret = i915_gem_evict_something(dev, vm, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 9205a4179b7e..61bf5e20e5e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -32,26 +32,21 @@ #include "i915_trace.h" static bool -mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) +mark_free(struct i915_vma *vma, struct list_head *unwind) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); - - if (obj->pin_count) + if (vma->obj->pin_count) return false; - list_add(&obj->exec_list, unwind); + list_add(&vma->obj->exec_list, unwind); return drm_mm_scan_add_block(&vma->node); } int -i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, unsigned cache_level, +i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, + int min_size, unsigned alignment, unsigned cache_level, bool mappable, bool nonblocking) { drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; struct list_head eviction_list, unwind_list; struct i915_vma *vma; struct drm_i915_gem_object *obj; @@ -83,16 +78,18 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, */ INIT_LIST_HEAD(&unwind_list); - if (mappable) + if (mappable) { + BUG_ON(!i915_is_ggtt(vm)); drm_mm_init_scan_with_range(&vm->mm, min_size, alignment, cache_level, 0, dev_priv->gtt.mappable_end); - else + } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &vm->inactive_list, mm_list) { - if (mark_free(obj, &unwind_list)) + struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); + if (mark_free(vma, &unwind_list)) goto found; } @@ -101,7 +98,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, /* Now merge in the soon-to-be-expired objects... */ list_for_each_entry(obj, &vm->active_list, mm_list) { - if (mark_free(obj, &unwind_list)) + struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); + if (mark_free(vma, &unwind_list)) goto found; } @@ -111,7 +109,7 @@ none: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); + vma = i915_gem_obj_to_vma(obj, vm); ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); @@ -132,7 +130,7 @@ found: obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); - vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); + vma = i915_gem_obj_to_vma(obj, vm); if (drm_mm_scan_remove_block(&vma->node)) { list_move(&obj->exec_list, &eviction_list); drm_gem_object_reference(&obj->base); @@ -147,7 +145,7 @@ found: struct drm_i915_gem_object, exec_list); if (ret == 0) - ret = i915_gem_object_ggtt_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); @@ -160,13 +158,18 @@ int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_address_space *vm; struct drm_i915_gem_object *obj, *next; - bool lists_empty; + bool lists_empty = true; int ret; - lists_empty = (list_empty(&vm->inactive_list) && - list_empty(&vm->active_list)); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + lists_empty = (list_empty(&vm->inactive_list) && + list_empty(&vm->active_list)); + if (!lists_empty) + lists_empty = false; + } + if (lists_empty) return -ENOSPC; @@ -183,9 +186,11 @@ i915_gem_evict_everything(struct drm_device *dev) i915_gem_retire_requests(dev); /* Having flushed everything, unbind() should never raise an error */ - list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) - if (obj->pin_count == 0) - WARN_ON(i915_gem_object_ggtt_unbind(obj)); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) + if (obj->pin_count == 0) + WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm))); + } return 0; } -- cgit v1.2.3 From 9843877d10700d6b64b615e0e8724fc9f6ff6268 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:12 -0700 Subject: drm/i915: turn bound_ggtt checks to bound_any In some places, we want to know if an object is bound in any address space, and not just the global GTT. This often applies when there is a single global resource (object, pages, etc.) function | reason -------------------------------------------------- i915_gem_object_is_inactive | global object i915_gem_object_put_pages | object's pages 915_gem_object_unpin | global object i915_gem_execbuffer_unreserve_object | temporary until we plumb vma pread/pwrite | see the note below Note: set_to_gtt_domain in pwrite/pread is abused as a wait_rendering call - but that once only worked if the object is bound. We really should replace this with a plain wait_rendering call, which would have the upside that in pread it would be clearer that we actually only wait for oustanding gpu writes. Signed-off-by: Ben Widawsky [danvet: Explain the set_to_gtt_domain in pwrite/pread and volunteer Ben to replace those with wait_rendering calls.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 12 ++++++------ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6c8c6b6b91ca..a51731e9233b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -141,7 +141,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return i915_gem_obj_ggtt_bound(obj) && !obj->active; + return i915_gem_obj_bound_any(obj) && !obj->active; } int @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - if (i915_gem_obj_ggtt_bound(obj)) { + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) return ret; @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - if (i915_gem_obj_ggtt_bound(obj)) { + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; @@ -1673,7 +1673,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages_pin_count) return -EBUSY; - BUG_ON(i915_gem_obj_ggtt_bound(obj)); + BUG_ON(i915_gem_obj_bound_any(obj)); /* ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt @@ -3311,7 +3311,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int ret; /* Not valid to be called on unbound objects. */ - if (!i915_gem_obj_ggtt_bound(obj)) + if (!i915_gem_obj_bound_any(obj)) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3735,7 +3735,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { BUG_ON(obj->pin_count == 0); - BUG_ON(!i915_gem_obj_ggtt_bound(obj)); + BUG_ON(!i915_gem_obj_bound_any(obj)); if (--obj->pin_count == 0) obj->pin_mappable = false; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 17be2e4bae6b..aa3fa9425cae 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -466,7 +466,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) { struct drm_i915_gem_exec_object2 *entry; - if (!i915_gem_obj_ggtt_bound(obj)) + if (!i915_gem_obj_bound_any(obj)) return; entry = obj->exec_entry; -- cgit v1.2.3 From 3c6b054de1c0243a0f708abf337f2a0e270ce247 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:13 +0100 Subject: video/hdmi: Replace the payload length by their defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Thierry Reding Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- drivers/video/hdmi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 40178338b619..dbd882ffafbb 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -52,7 +52,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame) frame->type = HDMI_INFOFRAME_TYPE_AVI; frame->version = 2; - frame->length = 13; + frame->length = HDMI_AVI_INFOFRAME_SIZE; return 0; } @@ -151,7 +151,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, frame->type = HDMI_INFOFRAME_TYPE_SPD; frame->version = 1; - frame->length = 25; + frame->length = HDMI_SPD_INFOFRAME_SIZE; strncpy(frame->vendor, vendor, sizeof(frame->vendor)); strncpy(frame->product, product, sizeof(frame->product)); @@ -218,7 +218,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame) frame->type = HDMI_INFOFRAME_TYPE_AUDIO; frame->version = 1; - frame->length = 10; + frame->length = HDMI_AUDIO_INFOFRAME_SIZE; return 0; } -- cgit v1.2.3 From 72b098964d3c3fb030dcac2d4c869c9851a0d17a Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:14 +0100 Subject: video/hdmi: Introduce a generic hdmi_infoframe union MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And a way to pack hdmi_infoframe generically. Cc: Thierry Reding Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- drivers/video/hdmi.c | 43 +++++++++++++++++++++++++++++++++++++++++++ include/linux/hdmi.h | 17 +++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index dbd882ffafbb..f7a85e5b7370 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -22,6 +22,7 @@ */ #include +#include #include #include #include @@ -321,3 +322,45 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, return length; } EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); + +/** + * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer + * @frame: HDMI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t +hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size) +{ + ssize_t length; + + switch (frame->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: + length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_SPD: + length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + length = hdmi_vendor_infoframe_pack(&frame->vendor, + buffer, size); + break; + default: + WARN(1, "Bad infoframe type %d\n", frame->any.type); + length = -EINVAL; + } + + return length; +} +EXPORT_SYMBOL(hdmi_infoframe_pack); diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 3b589440ecfe..0f3f82eadef7 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -23,6 +23,12 @@ enum hdmi_infoframe_type { #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 +struct hdmi_any_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; +}; + enum hdmi_colorspace { HDMI_COLORSPACE_RGB, HDMI_COLORSPACE_YUV422, @@ -228,4 +234,15 @@ struct hdmi_vendor_infoframe { ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); +union hdmi_infoframe { + struct hdmi_any_infoframe any; + struct hdmi_avi_infoframe avi; + struct hdmi_spd_infoframe spd; + struct hdmi_vendor_infoframe vendor; + struct hdmi_audio_infoframe audio; +}; + +ssize_t +hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); + #endif /* _DRM_HDMI_H */ -- cgit v1.2.3 From 61177b0e12ba162d5de206914e8703d8eb90ad19 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:15 +0100 Subject: video/hdmi: Add a macro to return the size of a full infoframe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Thierry Reding Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- include/linux/hdmi.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 0f3f82eadef7..bc6743e76e37 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -23,6 +23,9 @@ enum hdmi_infoframe_type { #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 +#define HDMI_INFOFRAME_SIZE(type) \ + (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) + struct hdmi_any_infoframe { enum hdmi_infoframe_type type; unsigned char version; -- cgit v1.2.3 From 3b390f62674eb70522057e58b1787f2cfa2f57c1 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:16 +0100 Subject: video/hmdi: Clear the whole incoming buffer, not just the infoframe size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the user if this API is providing a bigger buffer than the infoframe size, it could be for a could reason. For instance it could be because it gives the buffer that will be written to the hardware, up to the maximum of an infoframe size. Instead of just zeroing up to the infoframe size, let's zero the whole incoming buffer as those extra bytes are also used to compute the ECC and need to be 0. Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/video/hdmi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index f7a85e5b7370..635d5690dd5a 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -84,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, if (size < length) return -ENOSPC; - memset(buffer, 0, length); + memset(buffer, 0, size); ptr[0] = frame->type; ptr[1] = frame->version; @@ -186,7 +186,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, if (size < length) return -ENOSPC; - memset(buffer, 0, length); + memset(buffer, 0, size); ptr[0] = frame->type; ptr[1] = frame->version; @@ -251,7 +251,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, if (size < length) return -ENOSPC; - memset(buffer, 0, length); + memset(buffer, 0, size); if (frame->channels >= 2) channels = frame->channels - 1; @@ -308,7 +308,7 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, if (size < length) return -ENOSPC; - memset(buffer, 0, length); + memset(buffer, 0, size); ptr[0] = frame->type; ptr[1] = frame->version; -- cgit v1.2.3 From 03a7a189c22bf911025c695841c18a681498a102 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:17 +0100 Subject: drm: Don't generate invalid AVI infoframes for CEA modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From CEA-861: Data Byte 1, bit A0 indicates whether Active Format Data is present in Data Byte 2 bits R3 through R0. A source device shall set A0=1 when any of the AFD bits are set. ie. if we want to set active_aspect, we need to set the active_info_valid bit to 1 as well. Cc: Thierry Reding Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_edid.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95d6f4b6967c..8d1139f602ec 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3107,6 +3107,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, return 0; frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; + frame->active_info_valid = 1; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; return 0; -- cgit v1.2.3 From 178f736ab96637bc17bcf00c3b58af0137e880e0 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:18 +0100 Subject: drm/i915/hdmi: Change the write_infoframe vfunc to take a buffer and a type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First step in the move to the shared infoframe infrastructure, let's move the different infoframe helpers and the write_infoframe() vfunc to a type (enum hdmi_infoframe_type) and a buffer + len instead of using our struct dip_infoframe. v2: constify the infoframe pointer and don't mix signs (Ville Syrjälä) Signed-off-by: Damien Lespiau Signed-off-by: Paulo Zanoni Signed-off-by: Thierry Reding Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 4 +- drivers/gpu/drm/i915/intel_hdmi.c | 106 ++++++++++++++++++++------------------ 2 files changed, 59 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 54e389de9f42..712e29e27c14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -26,6 +26,7 @@ #define __INTEL_DRV_H__ #include +#include #include #include "i915_drv.h" #include @@ -464,7 +465,8 @@ struct intel_hdmi { enum hdmi_force_audio force_audio; bool rgb_quant_range_selectable; void (*write_infoframe)(struct drm_encoder *encoder, - struct dip_infoframe *frame); + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len); void (*set_infoframes)(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode); }; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 5cad59fd6bd2..951f4817e1b6 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -81,74 +82,75 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame) frame->checksum = 0x100 - sum; } -static u32 g4x_infoframe_index(struct dip_infoframe *frame) +static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_SELECT_AVI; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_SELECT_SPD; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 g4x_infoframe_enable(struct dip_infoframe *frame) +static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 hsw_infoframe_enable(struct dip_infoframe *frame) +static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI_HSW; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD_HSW; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, +static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, enum transcoder cpu_transcoder) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } static void g4x_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 val = I915_READ(VIDEO_DIP_CTL); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i; WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(VIDEO_DIP_CTL, val); @@ -162,7 +164,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_DATA, 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -171,22 +173,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, } static void ibx_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -200,7 +202,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -209,25 +211,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, } static void cpt_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); /* The DIP control register spec says that we need to update the AVI * infoframe without clearing its enable bit */ - if (frame->type != DIP_TYPE_AVI) - val &= ~g4x_infoframe_enable(frame); + if (type != HDMI_INFOFRAME_TYPE_AVI) + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -241,7 +243,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -250,22 +252,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, } static void vlv_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -279,7 +281,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -288,21 +290,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, } static void hsw_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); - u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); - unsigned int i, len = DIP_HEADER_SIZE + frame->len; + u32 data_reg; + int i; u32 val = I915_READ(ctl_reg); + data_reg = hsw_infoframe_data_reg(type, + intel_crtc->config.cpu_transcoder); if (data_reg == 0) return; - val &= ~hsw_infoframe_enable(frame); + val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); mmiowb(); @@ -315,7 +320,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, I915_WRITE(data_reg + i, 0); mmiowb(); - val |= hsw_infoframe_enable(frame); + val |= hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); POSTING_READ(ctl_reg); } @@ -326,7 +331,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); intel_dip_infoframe_csum(frame); - intel_hdmi->write_infoframe(encoder, frame); + intel_hdmi->write_infoframe(encoder, frame->type, (uint8_t *)frame, + DIP_HEADER_SIZE + frame->len); } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, -- cgit v1.2.3 From 5adaea799c1c2c00a1ffe995255af25717029b65 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:19 +0100 Subject: drm/i915/hdmi: Port the infoframe code to the common hdmi helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's use the drivers/video/hmdi.c and drm infoframe helpers to build our infoframes. v2: Simplify the logic to compute the buffer size. We can just take the maximum infoframe size rounded to 32, which happens to be what the hardware let us write anyway. v3: Remove unnecessary memset() (Ville Syrjälä) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 82 +++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 951f4817e1b6..84e57c74a8d5 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -325,14 +325,43 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, POSTING_READ(ctl_reg); } +/* + * The data we write to the DIP data buffer registers is 1 byte bigger than the + * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting + * at 0). It's also a byte used by DisplayPort so the same DIP registers can be + * used for both technologies. + * + * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0 + * DW1: DB3 | DB2 | DB1 | DB0 + * DW2: DB7 | DB6 | DB5 | DB4 + * DW3: ... + * + * (HB is Header Byte, DB is Data Byte) + * + * The hdmi pack() functions don't know about that hardware specific hole so we + * trick them by giving an offset into the buffer and moving back the header + * bytes by one. + */ static void intel_set_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + union hdmi_infoframe *frame) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + uint8_t buffer[VIDEO_DIP_DATA_SIZE]; + ssize_t len; - intel_dip_infoframe_csum(frame); - intel_hdmi->write_infoframe(encoder, frame->type, (uint8_t *)frame, - DIP_HEADER_SIZE + frame->len); + /* see comment above for the reason for this offset */ + len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); + if (len < 0) + return; + + /* Insert the 'hole' (see big comment above) at position 3 */ + buffer[0] = buffer[1]; + buffer[1] = buffer[2]; + buffer[2] = buffer[3]; + buffer[3] = 0; + len++; + + intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, @@ -340,40 +369,45 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; + union hdmi_infoframe frame; + int ret; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + adjusted_mode); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return; + } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) - avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; + frame.avi.pixel_repeat = 1; if (intel_hdmi->rgb_quant_range_selectable) { if (intel_crtc->config.limited_color_range) - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_LIMITED; else - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_FULL; } - avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); - - intel_set_infoframe(encoder, &avi_if); + intel_set_infoframe(encoder, &frame); } static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) { - struct dip_infoframe spd_if; + union hdmi_infoframe frame; + int ret; + + ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); + if (ret < 0) { + DRM_ERROR("couldn't fill SPD infoframe\n"); + return; + } - memset(&spd_if, 0, sizeof(spd_if)); - spd_if.type = DIP_TYPE_SPD; - spd_if.ver = DIP_VERSION_SPD; - spd_if.len = DIP_LEN_SPD; - strcpy(spd_if.body.spd.vn, "Intel"); - strcpy(spd_if.body.spd.pd, "Integrated gfx"); - spd_if.body.spd.sdi = DIP_SPD_PC; + frame.spd.sdi = HDMI_SPD_SDI_PC; - intel_set_infoframe(encoder, &spd_if); + intel_set_infoframe(encoder, &frame); } static void g4x_set_infoframes(struct drm_encoder *encoder, -- cgit v1.2.3 From 15dcd3502160a46acf11e5cb5e80e9d90a6f9f60 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:20 +0100 Subject: drm/i915/sdvo: Port the infoframe code to the shared infrastructure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Signed-off-by: Paulo Zanoni Signed-off-by: Thierry Reding Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 47423f31f82b..02f220b4e4a1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -963,30 +963,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, const struct drm_display_mode *adjusted_mode) { - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc); + uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + struct drm_crtc *crtc = intel_sdvo->base.base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + union hdmi_infoframe frame; + int ret; + ssize_t len; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + adjusted_mode); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return false; + } if (intel_sdvo->rgb_quant_range_selectable) { if (intel_crtc->config.limited_color_range) - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_LIMITED; else - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_FULL; } - avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); - - intel_dip_infoframe_csum(&avi_if); - - /* sdvo spec says that the ecc is handled by the hw, and it looks like - * we must not send the ecc field, either. */ - memcpy(sdvo_data, &avi_if, 3); - sdvo_data[3] = avi_if.checksum; - memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data)); + if (len < 0) + return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, SDVO_HBUF_TX_VSYNC, -- cgit v1.2.3 From c5022bb9ff2a606839ede18ccf268444d3f99729 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:21 +0100 Subject: drm/i915: Remove the now obsolete infoframe definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All the HDMI infoframe code has been ported to use video/hdmi.c, so it's time to say bye bye to this code. Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 61 --------------------------------------- drivers/gpu/drm/i915/intel_hdmi.c | 15 ---------- 2 files changed, 76 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 712e29e27c14..7df662bab280 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -395,66 +395,6 @@ struct cxsr_latency { #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) -#define DIP_HEADER_SIZE 5 - -#define DIP_TYPE_AVI 0x82 -#define DIP_VERSION_AVI 0x2 -#define DIP_LEN_AVI 13 -#define DIP_AVI_PR_1 0 -#define DIP_AVI_PR_2 1 -#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) -#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) -#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) - -#define DIP_TYPE_SPD 0x83 -#define DIP_VERSION_SPD 0x1 -#define DIP_LEN_SPD 25 -#define DIP_SPD_UNKNOWN 0 -#define DIP_SPD_DSTB 0x1 -#define DIP_SPD_DVDP 0x2 -#define DIP_SPD_DVHS 0x3 -#define DIP_SPD_HDDVR 0x4 -#define DIP_SPD_DVC 0x5 -#define DIP_SPD_DSC 0x6 -#define DIP_SPD_VCD 0x7 -#define DIP_SPD_GAME 0x8 -#define DIP_SPD_PC 0x9 -#define DIP_SPD_BD 0xa -#define DIP_SPD_SCD 0xb - -struct dip_infoframe { - uint8_t type; /* HB0 */ - uint8_t ver; /* HB1 */ - uint8_t len; /* HB2 - body len, not including checksum */ - uint8_t ecc; /* Header ECC */ - uint8_t checksum; /* PB0 */ - union { - struct { - /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ - uint8_t Y_A_B_S; - /* PB2 - C 7:6, M 5:4, R 3:0 */ - uint8_t C_M_R; - /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ - uint8_t ITC_EC_Q_SC; - /* PB4 - VIC 6:0 */ - uint8_t VIC; - /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */ - uint8_t YQ_CN_PR; - /* PB6 to PB13 */ - uint16_t top_bar_end; - uint16_t bottom_bar_start; - uint16_t left_bar_end; - uint16_t right_bar_start; - } __attribute__ ((packed)) avi; - struct { - uint8_t vn[8]; - uint8_t pd[16]; - uint8_t sdi; - } __attribute__ ((packed)) spd; - uint8_t payload[27]; - } __attribute__ ((packed)) body; -} __attribute__((packed)); - struct intel_hdmi { u32 hdmi_reg; int ddc_bus; @@ -568,7 +508,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); -extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 84e57c74a8d5..b7514886debc 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -67,21 +67,6 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); } -void intel_dip_infoframe_csum(struct dip_infoframe *frame) -{ - uint8_t *data = (uint8_t *)frame; - uint8_t sum = 0; - unsigned i; - - frame->checksum = 0; - frame->ecc = 0; - - for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) - sum += data[i]; - - frame->checksum = 0x100 - sum; -} - static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) { switch (type) { -- cgit v1.2.3 From bf02db99384929a12eff0cf1205b4547e41e881b Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:22 +0100 Subject: drm: Handle the DBLCLK flag in the common infoframe helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_edid.c | 3 +++ drivers/gpu/drm/i915/intel_hdmi.c | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 8d1139f602ec..a9c8980367e0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3102,6 +3102,9 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, if (err < 0) return err; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + frame->pixel_repeat = 1; + frame->video_code = drm_match_cea_mode(mode); if (!frame->video_code) return 0; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b7514886debc..b54fe29f2eb5 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -364,9 +364,6 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, return; } - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) - frame.avi.pixel_repeat = 1; - if (intel_hdmi->rgb_quant_range_selectable) { if (intel_crtc->config.limited_color_range) frame.avi.quantization_range = -- cgit v1.2.3 From 15747638e63cdfb94a8ff4af8e4b53e966e2bace Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:23 +0100 Subject: drm: Set aspect ratio fields in the AVI infoframe even for non CEA modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I cannot find any evidence what we shouldn't try to set those fields when setting a non-CEA mode on an HDMI sink. So just kill that return. Suggested-by: Ville Syrjälä Signed-off-by: Damien Lespiau Acked-by: Dave Airlie Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_edid.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a9c8980367e0..dfc7a1ba9360 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3106,8 +3106,6 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, frame->pixel_repeat = 1; frame->video_code = drm_match_cea_mode(mode); - if (!frame->video_code) - return 0; frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; frame->active_info_valid = 1; -- cgit v1.2.3 From 9198ee5b9048418849ad96fe37eaff8cd56aaf43 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 6 Aug 2013 20:32:24 +0100 Subject: drm/i915/hmdi: Rename set_infoframe() to write_infoframe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit set_frame() wraps the write_frame() vfunc. Be consistent and name the wrapping function like the vfunc being called. It's doubly confusing as we also have a set_infoframes() vfunc and set_infoframe() doesn't wrap it. Reviewed-by: Ville Syrjälä Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b54fe29f2eb5..88562913fb7f 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -327,8 +327,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, * trick them by giving an offset into the buffer and moving back the header * bytes by one. */ -static void intel_set_infoframe(struct drm_encoder *encoder, - union hdmi_infoframe *frame) +static void intel_write_infoframe(struct drm_encoder *encoder, + union hdmi_infoframe *frame) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); uint8_t buffer[VIDEO_DIP_DATA_SIZE]; @@ -373,7 +373,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, HDMI_QUANTIZATION_RANGE_FULL; } - intel_set_infoframe(encoder, &frame); + intel_write_infoframe(encoder, &frame); } static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) @@ -389,7 +389,7 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) frame.spd.sdi = HDMI_SPD_SDI_PC; - intel_set_infoframe(encoder, &frame); + intel_write_infoframe(encoder, &frame); } static void g4x_set_infoframes(struct drm_encoder *encoder, -- cgit v1.2.3 From 39db4a4d7f9b2809141e5bc0e06f7a5b7daeb356 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:00 +0300 Subject: drm/i915: Use 'enabled' instead of 'enable' consistently in sprite WM code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's be consistent and always call our variables 'enabled' insted of the occasional 'enable'. Signed-off-by: Ville Syrjälä [danvet: Spelling fix in the commit message, spotted by Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c20d68298072..6761717fe335 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2838,7 +2838,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable, bool scaled) + bool enabled, bool scaled) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ @@ -2846,7 +2846,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, int sprite_wm, reg; int ret; - if (!enable) + if (!enabled) return; switch (pipe) { @@ -2961,13 +2961,13 @@ void intel_update_watermarks(struct drm_device *dev) void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, - bool enable, bool scaled) + bool enabled, bool scaled) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->display.update_sprite_wm) dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, - pixel_size, enable, scaled); + pixel_size, enabled, scaled); } static struct drm_i915_gem_object * -- cgit v1.2.3 From 6f5ddd170453ff44aed1b6efe53ff872295ef538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:02 +0300 Subject: drm/i915: Split watermark level computation from the code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the watermarks computation for one level to a separate function. This function will now set the ->enable flag to true, even if the watermark level wasn't actually checked yet. In the future we will delay the checking so we must consider all unchecked watermarks as possibly valid. v2: Preserve comment about latency units Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 51 +++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6761717fe335..fe1992f2002b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2284,31 +2284,45 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, params->pri_bytes_per_pixel); } +static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, + int level, + struct hsw_pipe_wm_parameters *p, + struct hsw_lp_wm_result *result) +{ + uint16_t pri_latency = dev_priv->wm.pri_latency[level]; + uint16_t spr_latency = dev_priv->wm.spr_latency[level]; + uint16_t cur_latency = dev_priv->wm.cur_latency[level]; + + /* WM1+ latency values stored in 0.5us units */ + if (level > 0) { + pri_latency *= 5; + spr_latency *= 5; + cur_latency *= 5; + } + + result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); + result->spr_val = ilk_compute_spr_wm(p, spr_latency); + result->cur_val = ilk_compute_cur_wm(p, cur_latency); + result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); + result->enable = true; +} + static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, int level, struct hsw_wm_maximums *max, struct hsw_pipe_wm_parameters *params, struct hsw_lp_wm_result *result) { enum pipe pipe; - uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3]; - - for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { - struct hsw_pipe_wm_parameters *p = ¶ms[pipe]; - /* WM1+ latency values stored in 0.5us units */ - uint16_t pri_latency = dev_priv->wm.pri_latency[level] * 5; - uint16_t spr_latency = dev_priv->wm.spr_latency[level] * 5; - uint16_t cur_latency = dev_priv->wm.cur_latency[level] * 5; + struct hsw_lp_wm_result res[3]; - pri_val[pipe] = ilk_compute_pri_wm(p, pri_latency, true); - spr_val[pipe] = ilk_compute_spr_wm(p, spr_latency); - cur_val[pipe] = ilk_compute_cur_wm(p, cur_latency); - fbc_val[pipe] = ilk_compute_fbc_wm(p, pri_val[pipe]); - } + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) + ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]); - result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); - result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]); - result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]); - result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]); + result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val); + result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val); + result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val); + result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); + result->enable = true; if (result->fbc_val > max->fbc) { result->fbc_enable = false; @@ -2317,6 +2331,9 @@ static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, result->fbc_enable = true; } + if (!result->enable) + return false; + result->enable = result->pri_val <= max->pri && result->spr_val <= max->spr && result->cur_val <= max->cur; -- cgit v1.2.3 From 71fff20ff1bb790f4defe0c880e028581ffab420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:03 +0300 Subject: drm/i915: Kill fbc_enable from hsw_lp_wm_results MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to store the FBC WM enabled status in each watermark level. We anyway have to reduce it down to a single boolean, so just delay checking the FBC WM limit until we're computing the final value. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fe1992f2002b..d5f0b4e1f1c4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2182,7 +2182,6 @@ struct hsw_wm_maximums { struct hsw_lp_wm_result { bool enable; - bool fbc_enable; uint32_t pri_val; uint32_t spr_val; uint32_t cur_val; @@ -2324,13 +2323,6 @@ static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); result->enable = true; - if (result->fbc_val > max->fbc) { - result->fbc_enable = false; - result->fbc_val = 0; - } else { - result->fbc_enable = true; - } - if (!result->enable) return false; @@ -2575,9 +2567,9 @@ static void hsw_compute_wm_results(struct drm_device *dev, * a WM level. */ results->enable_fbc_wm = true; for (level = 1; level <= max_level; level++) { - if (!lp_results[level - 1].fbc_enable) { + if (!lp_results[level - 1].fbc_val > lp_maximums->fbc) { results->enable_fbc_wm = false; - break; + lp_results[level - 1].fbc_val = 0; } } -- cgit v1.2.3 From 77c122bcc448439af7f5fcb2542406b45b606c51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:04 +0300 Subject: drm/i915: Rename hsw_data_buf_partitioning to intel_ddb_partitioning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're going to use the 1/2 vs. 5/6 split option already on IVB so the HSW name is not proper. Just give it an intel_ prefix and move it to i915_drv.h so that we can use it there later. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 +++++ drivers/gpu/drm/i915/intel_pm.c | 17 ++++++----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2421ad17831b..cb4521d95429 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1055,6 +1055,11 @@ struct intel_vbt_data { struct child_device_config *child_dev; }; +enum intel_ddb_partitioning { + INTEL_DDB_PART_1_2, + INTEL_DDB_PART_5_6, /* IVB+ */ +}; + typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d5f0b4e1f1c4..81d88d040128 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2196,11 +2196,6 @@ struct hsw_wm_values { bool enable_fbc_wm; }; -enum hsw_data_buf_partitioning { - HSW_DATA_BUF_PART_1_2, - HSW_DATA_BUF_PART_5_6, -}; - /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. @@ -2631,11 +2626,11 @@ static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, */ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, struct hsw_wm_values *results, - enum hsw_data_buf_partitioning partitioning) + enum intel_ddb_partitioning partitioning) { struct hsw_wm_values previous; uint32_t val; - enum hsw_data_buf_partitioning prev_partitioning; + enum intel_ddb_partitioning prev_partitioning; bool prev_enable_fbc_wm; previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); @@ -2652,7 +2647,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? - HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2; + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); @@ -2691,7 +2686,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, if (prev_partitioning != partitioning) { val = I915_READ(WM_MISC); - if (partitioning == HSW_DATA_BUF_PART_1_2) + if (partitioning == INTEL_DDB_PART_1_2) val &= ~WM_MISC_DATA_PARTITION_5_6; else val |= WM_MISC_DATA_PARTITION_5_6; @@ -2728,7 +2723,7 @@ static void haswell_update_wm(struct drm_device *dev) struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; struct hsw_pipe_wm_parameters params[3]; struct hsw_wm_values results_1_2, results_5_6, *best_results; - enum hsw_data_buf_partitioning partitioning; + enum intel_ddb_partitioning partitioning; hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); @@ -2743,7 +2738,7 @@ static void haswell_update_wm(struct drm_device *dev) } partitioning = (best_results == &results_1_2) ? - HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6; + INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; hsw_write_wm_values(dev_priv, best_results, partitioning); } -- cgit v1.2.3 From 2b4bd0e0658b98341a899d9550169ffa26c32e39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 15:11:52 +0300 Subject: drm/i915: Silence a sparse warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/i915/i915_debugfs.c:2136:3: warning: symbol 'i915_debugfs_files' was not declared. Should it be static? Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d2935b4fd695..5d52a23d5662 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2130,7 +2130,7 @@ static struct drm_info_list i915_debugfs_list[] = { }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) -struct i915_debugfs_files { +static struct i915_debugfs_files { const char *name; const struct file_operations *fops; } i915_debugfs_files[] = { -- cgit v1.2.3 From 5cacaac77cfc1130a2d8bf60addb5c6c9c878214 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:13 -0700 Subject: drm/i915: Fix up map and fenceable for VMA formerly: "drm/i915: Create VMAs (part 3.5) - map and fenceable tracking" The map_and_fenceable tracking is per object. GTT mapping, and fences only apply to global GTT. As such, object operations which are not performed on the global GTT should not effect mappable or fenceable characteristics. Functionally, this commit could very well be squashed in to a previous patch which updated object operations to take a VM argument. This commit is split out because it's a bit tricky (or at least it was for me). Signed-off-by: Ben Widawsky [danvet: Drop the bogus hunk in i915_vma_unbind as discussed with Ben.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a51731e9233b..5eacc497f179 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2642,7 +2642,8 @@ int i915_vma_unbind(struct i915_vma *vma) list_del(&obj->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ - obj->map_and_fenceable = true; + if (i915_is_ggtt(vma->vm)) + obj->map_and_fenceable = true; list_del(&vma->vma_link); drm_mm_remove_node(&vma->node); @@ -3203,7 +3204,9 @@ search_free: i915_is_ggtt(vm) && vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; - obj->map_and_fenceable = mappable && fenceable; + /* Map and fenceable only changes if the VM is the global GGTT */ + if (i915_is_ggtt(vm)) + obj->map_and_fenceable = mappable && fenceable; trace_i915_vma_bind(vma, map_and_fenceable); i915_gem_verify_gtt(dev); -- cgit v1.2.3 From ca191b1313e733e47a9fb37c26b44aa6cdd9b1b1 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:14 -0700 Subject: drm/i915: mm_list is per VMA formerly: "drm/i915: Create VMAs (part 5) - move mm_list" The mm_list is used for the active/inactive LRUs. Since those LRUs are per address space, the link should be per VMx . Because we'll only ever have 1 VMA before this point, it's not incorrect to defer this change until this point in the patch series, and doing it here makes the change much easier to understand. Shamelessly manipulated out of Daniel: "active/inactive stuff is used by eviction when we run out of address space, so needs to be per-vma and per-address space. Bound/unbound otoh is used by the shrinker which only cares about the amount of memory used and not one bit about in which address space this memory is all used in. Of course to actual kick out an object we need to unbind it from every address space, but for that we have the per-object list of vmas." v2: only bump GGTT LRU in i915_gem_object_set_to_gtt_domain (Chris) v3: Moved earlier in the series v4: Add dropped message from v3 Signed-off-by: Ben Widawsky [danvet: Frob patch to apply and use vma->node.size directly as discused with Ben. Also drop a needles BUG_ON before move_to_inactive, the function itself has the same check.] [danvet 2nd: Rebase on top of the lost "drm/i915: Cleanup more of VMA in destroy", specifically unlink the vma from the mm_list in vma_unbind (to keep it symmetric with bind_to_vm) instead of vma_destroy.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 54 +++++++++++++++++++----------- drivers/gpu/drm/i915/i915_drv.h | 5 +-- drivers/gpu/drm/i915/i915_gem.c | 28 +++++++++------- drivers/gpu/drm/i915/i915_gem_context.c | 3 ++ drivers/gpu/drm/i915/i915_gem_evict.c | 14 ++++---- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++ drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 37 +++++++++++--------- 8 files changed, 86 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5d52a23d5662..a1f4c91fb112 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -149,7 +149,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_address_space *vm = &dev_priv->gtt.base; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; size_t total_obj_size, total_gtt_size; int count, ret; @@ -157,6 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) if (ret) return ret; + /* FIXME: the user of this interface might want more than just GGTT */ switch (list) { case ACTIVE_LIST: seq_puts(m, "Active:\n"); @@ -172,12 +173,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, head, mm_list) { - seq_puts(m, " "); - describe_obj(m, obj); - seq_putc(m, '\n'); - total_obj_size += obj->base.size; - total_gtt_size += i915_gem_obj_ggtt_size(obj); + list_for_each_entry(vma, head, mm_list) { + seq_printf(m, " "); + describe_obj(m, vma->obj); + seq_printf(m, "\n"); + total_obj_size += vma->obj->base.size; + total_gtt_size += vma->node.size; count++; } mutex_unlock(&dev->struct_mutex); @@ -224,7 +225,18 @@ static int per_file_stats(int id, void *ptr, void *data) return 0; } -static int i915_gem_object_info(struct seq_file *m, void *data) +#define count_vmas(list, member) do { \ + list_for_each_entry(vma, list, member) { \ + size += i915_gem_obj_ggtt_size(vma->obj); \ + ++count; \ + if (vma->obj->map_and_fenceable) { \ + mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ + ++mappable_count; \ + } \ + } \ +} while (0) + +static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; @@ -234,6 +246,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj; struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_file *file; + struct i915_vma *vma; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -250,12 +263,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&vm->active_list, mm_list); + count_vmas(&vm->active_list, mm_list); seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&vm->inactive_list, mm_list); + count_vmas(&vm->inactive_list, mm_list); seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); @@ -1774,7 +1787,8 @@ i915_drop_caches_set(void *data, u64 val) struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_address_space *vm; + struct i915_vma *vma, *x; int ret; DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); @@ -1795,14 +1809,16 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_retire_requests(dev); if (val & DROP_BOUND) { - list_for_each_entry_safe(obj, next, &vm->inactive_list, - mm_list) { - if (obj->pin_count) - continue; - - ret = i915_gem_object_ggtt_unbind(obj); - if (ret) - goto unlock; + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry_safe(vma, x, &vm->inactive_list, + mm_list) { + if (vma->obj->pin_count) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + goto unlock; + } } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cb4521d95429..20becc5500bd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -558,6 +558,9 @@ struct i915_vma { struct drm_i915_gem_object *obj; struct i915_address_space *vm; + /** This object's place on the active/inactive lists */ + struct list_head mm_list; + struct list_head vma_link; /* Link in the object's VMA list */ }; @@ -1299,9 +1302,7 @@ struct drm_i915_gem_object { struct drm_mm_node *stolen; struct list_head global_list; - /** This object's place on the active/inactive lists */ struct list_head ring_list; - struct list_head mm_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5eacc497f179..985a13035550 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1886,7 +1886,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); @@ -1902,8 +1901,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, obj->active = 1; } - /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj->mm_list, &vm->active_list); list_move_tail(&obj->ring_list, &ring->active_list); obj->last_read_seqno = seqno; @@ -1925,14 +1922,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, static void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; + struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - list_move_tail(&obj->mm_list, &vm->inactive_list); + list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2640,7 +2637,7 @@ int i915_vma_unbind(struct i915_vma *vma) i915_gem_gtt_finish_object(obj); i915_gem_object_unpin_pages(obj); - list_del(&obj->mm_list); + list_del(&vma->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ if (i915_is_ggtt(vma->vm)) obj->map_and_fenceable = true; @@ -3187,7 +3184,7 @@ search_free: goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &vm->inactive_list); + list_add_tail(&vma->mm_list, &vm->inactive_list); /* Keep GGTT vmas first to make debug easier */ if (i915_is_ggtt(vm)) @@ -3352,9 +3349,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - if (i915_gem_object_is_inactive(obj)) - list_move_tail(&obj->mm_list, - &dev_priv->gtt.base.inactive_list); + if (i915_gem_object_is_inactive(obj)) { + struct i915_vma *vma = i915_gem_obj_to_vma(obj, + &dev_priv->gtt.base); + if (vma) + list_move_tail(&vma->mm_list, + &dev_priv->gtt.base.inactive_list); + + } return 0; } @@ -3927,7 +3929,6 @@ unlock: void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops) { - INIT_LIST_HEAD(&obj->mm_list); INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); @@ -4069,6 +4070,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&vma->vma_link); + INIT_LIST_HEAD(&vma->mm_list); vma->vm = vm; vma->obj = obj; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 7273a729a039..403309c2a7d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -436,7 +436,10 @@ static int do_switch(struct i915_hw_context *to) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { + struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private; + struct i915_address_space *ggtt = &dev_priv->gtt.base; from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); i915_gem_object_move_to_active(from->obj, ring); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 61bf5e20e5e0..425939b7d343 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -87,8 +87,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj, &vm->inactive_list, mm_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); + list_for_each_entry(vma, &vm->inactive_list, mm_list) { if (mark_free(vma, &unwind_list)) goto found; } @@ -97,8 +96,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj, &vm->active_list, mm_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); + list_for_each_entry(vma, &vm->active_list, mm_list) { if (mark_free(vma, &unwind_list)) goto found; } @@ -159,7 +157,7 @@ i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct i915_address_space *vm; - struct drm_i915_gem_object *obj, *next; + struct i915_vma *vma, *next; bool lists_empty = true; int ret; @@ -187,9 +185,9 @@ i915_gem_evict_everything(struct drm_device *dev) /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) - if (obj->pin_count == 0) - WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm))); + list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + if (vma->obj->pin_count == 0) + WARN_ON(i915_vma_unbind(vma)); } return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index aa3fa9425cae..8ccc29ac9629 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -801,6 +801,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->base.read_domains = obj->base.pending_read_domains; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + /* FIXME: This lookup gets fixed later <-- danvet */ + list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); i915_gem_object_move_to_active(obj, ring); if (obj->base.write_domain) { obj->dirty = 1; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 934840860c6d..e68c4b5da46d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -401,7 +401,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, obj->has_global_gtt_mapping = 1; list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &ggtt->inactive_list); + list_add_tail(&vma->mm_list, &ggtt->inactive_list); return obj; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8091485e7e88..fad48b2bb870 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -556,11 +556,11 @@ static void capture_bo(struct drm_i915_error_buffer *err, static u32 capture_active_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; int i = 0; - list_for_each_entry(obj, head, mm_list) { - capture_bo(err++, obj); + list_for_each_entry(vma, head, mm_list) { + capture_bo(err++, vma->obj); if (++i == count) break; } @@ -622,7 +622,8 @@ static struct drm_i915_error_object * i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { - struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_address_space *vm; + struct i915_vma *vma; struct drm_i915_gem_object *obj; u32 seqno; @@ -642,20 +643,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, } seqno = ring->get_seqno(ring, false); - list_for_each_entry(obj, &vm->active_list, mm_list) { - if (obj->ring != ring) - continue; + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry(vma, &vm->active_list, mm_list) { + obj = vma->obj; + if (obj->ring != ring) + continue; - if (i915_seqno_passed(seqno, obj->last_read_seqno)) - continue; + if (i915_seqno_passed(seqno, obj->last_read_seqno)) + continue; - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) - continue; + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) + continue; - /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userspace. - */ - return i915_error_object_create(dev_priv, obj); + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userspace. + */ + return i915_error_object_create(dev_priv, obj); + } } return NULL; @@ -775,11 +779,12 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_vma *vma; struct drm_i915_gem_object *obj; int i; i = 0; - list_for_each_entry(obj, &vm->active_list, mm_list) + list_for_each_entry(vma, &vm->active_list, mm_list) i++; error->active_bo_count = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) -- cgit v1.2.3 From 95f5301dd880da2dea2c9a9c29750064536d426a Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:15 -0700 Subject: drm/i915: Update error capture for VMs formerly: "drm/i915: Create VMAs (part 4) - Error capture" Since the active/inactive lists are per VM, we need to modify the error capture code to be aware of this, and also extend it to capture the buffers from all the VMs. For now all the code assumes only 1 VM, but it will become more generic over the next few patches. NOTE: If the number of VMs in a real world system grows significantly we'll have to focus on only capturing the guilty VM, or else it's likely there won't be enough space for error capture. v2: Squashed in the "part 6" which had dependencies on the mm_list change. Since I've moved the mm_list change to an earlier point in the series, we were able to accomplish it here and now. v3: Rebased over new error capture Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_gpu_error.c | 76 ++++++++++++++++++++++++----------- 2 files changed, 55 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 20becc5500bd..d0cdec81aac3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -323,8 +323,8 @@ struct drm_i915_error_state { u32 purgeable:1; s32 ring:4; u32 cache_level:2; - } *active_bo, *pinned_bo; - u32 active_bo_count, pinned_bo_count; + } **active_bo, **pinned_bo; + u32 *active_bo_count, *pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; }; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fad48b2bb870..60393cb9a7c7 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -304,13 +304,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (error->active_bo) print_error_buffers(m, "Active", - error->active_bo, - error->active_bo_count); + error->active_bo[0], + error->active_bo_count[0]); if (error->pinned_bo) print_error_buffers(m, "Pinned", - error->pinned_bo, - error->pinned_bo_count); + error->pinned_bo[0], + error->pinned_bo_count[0]); for (i = 0; i < ARRAY_SIZE(error->ring); i++) { struct drm_i915_error_object *obj; @@ -775,42 +775,72 @@ static void i915_gem_record_rings(struct drm_device *dev, } } -static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error) +/* FIXME: Since pin count/bound list is global, we duplicate what we capture per + * VM. + */ +static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error, + struct i915_address_space *vm, + const int ndx) { - struct i915_address_space *vm = &dev_priv->gtt.base; - struct i915_vma *vma; + struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; struct drm_i915_gem_object *obj; + struct i915_vma *vma; int i; i = 0; list_for_each_entry(vma, &vm->active_list, mm_list) i++; - error->active_bo_count = i; + error->active_bo_count[ndx] = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) i++; - error->pinned_bo_count = i - error->active_bo_count; + error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; if (i) { - error->active_bo = kmalloc(sizeof(*error->active_bo)*i, - GFP_ATOMIC); - if (error->active_bo) - error->pinned_bo = - error->active_bo + error->active_bo_count; + active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); + if (active_bo) + pinned_bo = active_bo + error->active_bo_count[ndx]; } - if (error->active_bo) - error->active_bo_count = - capture_active_bo(error->active_bo, - error->active_bo_count, + if (active_bo) + error->active_bo_count[ndx] = + capture_active_bo(active_bo, + error->active_bo_count[ndx], &vm->active_list); - if (error->pinned_bo) - error->pinned_bo_count = - capture_pinned_bo(error->pinned_bo, - error->pinned_bo_count, + if (pinned_bo) + error->pinned_bo_count[ndx] = + capture_pinned_bo(pinned_bo, + error->pinned_bo_count[ndx], &dev_priv->mm.bound_list); + error->active_bo[ndx] = active_bo; + error->pinned_bo[ndx] = pinned_bo; +} + +static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct i915_address_space *vm; + int cnt = 0, i = 0; + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + cnt++; + + if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) + cnt = 1; + + vm = &dev_priv->gtt.base; + + error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); + error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); + error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), + GFP_ATOMIC); + error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), + GFP_ATOMIC); + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + i915_gem_capture_vm(dev_priv, error, vm, i++); } /** -- cgit v1.2.3 From 8b9c2b9411dd55617442f8151fb6fb2849c72f7e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 31 Jul 2013 17:00:16 -0700 Subject: drm/i915: Add vma to list at creation With the current code there shouldn't be a distinction - however with an upcoming change we intend to allocate a vma much earlier, before it's actually bound anywhere. To do this we have to check node allocation as well for the _bound() check. Signed-off-by: Ben Widawsky [danvet: move list_del(&vma->vma_link) from vma_unbind to vma_destroy, again fallout from the loss of "rm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter fixup for drm/i915: Add vma to list at creation --- drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 985a13035550..f5d389a20024 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2642,7 +2642,6 @@ int i915_vma_unbind(struct i915_vma *vma) if (i915_is_ggtt(vma->vm)) obj->map_and_fenceable = true; - list_del(&vma->vma_link); drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); @@ -3186,12 +3185,6 @@ search_free: list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&vma->mm_list, &vm->inactive_list); - /* Keep GGTT vmas first to make debug easier */ - if (i915_is_ggtt(vm)) - list_add(&vma->vma_link, &obj->vma_list); - else - list_add_tail(&vma->vma_link, &obj->vma_list); - fenceable = i915_is_ggtt(vm) && i915_gem_obj_ggtt_size(obj) == fence_size && @@ -4074,12 +4067,19 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, vma->vm = vm; vma->obj = obj; + /* Keep GGTT vmas first to make debug easier */ + if (i915_is_ggtt(vm)) + list_add(&vma->vma_link, &obj->vma_list); + else + list_add_tail(&vma->vma_link, &obj->vma_list); + return vma; } void i915_gem_vma_destroy(struct i915_vma *vma) { WARN_ON(vma->node.allocated); + list_del(&vma->vma_link); kfree(vma); } @@ -4767,7 +4767,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, vma_link) - if (vma->vm == vm) + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) return true; return false; -- cgit v1.2.3 From a9786a119d2cd0f43d5554bddda71a5fd6ee39ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 13:24:47 +0300 Subject: drm/i915: Pull watermark level validity check out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the code a bit to split the watermark level validity check into a separate function. Also add hack there that allows us to use it even for LP0 watermarks. ATM we don't pre-compute/check the LP0 watermarks, so we just have to clamp them to the maximum and hope things work out. v2: Add some debug prints when we exceed max WM0 Kill pointless ret = false' assignment. Include the check for the already disabled 'result' which got shuffled around when the patchs got reorderd Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 51 +++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 81d88d040128..b107d25282e3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2278,6 +2278,49 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, params->pri_bytes_per_pixel); } +static bool ilk_check_wm(int level, + const struct hsw_wm_maximums *max, + struct hsw_lp_wm_result *result) +{ + bool ret; + + /* already determined to be invalid? */ + if (!result->enable) + return false; + + result->enable = result->pri_val <= max->pri && + result->spr_val <= max->spr && + result->cur_val <= max->cur; + + ret = result->enable; + + /* + * HACK until we can pre-compute everything, + * and thus fail gracefully if LP0 watermarks + * are exceeded... + */ + if (level == 0 && !result->enable) { + if (result->pri_val > max->pri) + DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", + level, result->pri_val, max->pri); + if (result->spr_val > max->spr) + DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", + level, result->spr_val, max->spr); + if (result->cur_val > max->cur) + DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", + level, result->cur_val, max->cur); + + result->pri_val = min_t(uint32_t, result->pri_val, max->pri); + result->spr_val = min_t(uint32_t, result->spr_val, max->spr); + result->cur_val = min_t(uint32_t, result->cur_val, max->cur); + result->enable = true; + } + + DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis"); + + return ret; +} + static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, int level, struct hsw_pipe_wm_parameters *p, @@ -2318,13 +2361,7 @@ static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); result->enable = true; - if (!result->enable) - return false; - - result->enable = result->pri_val <= max->pri && - result->spr_val <= max->spr && - result->cur_val <= max->cur; - return result->enable; + return ilk_check_wm(level, max, result); } static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 1fd527cc34ed44efa4f59c01ad920479f728b707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:05 +0300 Subject: drm/i915: Rename hsw_lp_wm_result to intel_wm_level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's call hsw_lp_wm_result intel_wm_level from now on and move it to i915_drv.h for later use. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ drivers/gpu/drm/i915/intel_pm.c | 20 ++++++-------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d0cdec81aac3..2cfa21fbedce 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1063,6 +1063,14 @@ enum intel_ddb_partitioning { INTEL_DDB_PART_5_6, /* IVB+ */ }; +struct intel_wm_level { + bool enable; + uint32_t pri_val; + uint32_t spr_val; + uint32_t cur_val; + uint32_t fbc_val; +}; + typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b107d25282e3..d7bb61efca2d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2180,14 +2180,6 @@ struct hsw_wm_maximums { uint16_t fbc; }; -struct hsw_lp_wm_result { - bool enable; - uint32_t pri_val; - uint32_t spr_val; - uint32_t cur_val; - uint32_t fbc_val; -}; - struct hsw_wm_values { uint32_t wm_pipe[3]; uint32_t wm_lp[3]; @@ -2280,7 +2272,7 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, static bool ilk_check_wm(int level, const struct hsw_wm_maximums *max, - struct hsw_lp_wm_result *result) + struct intel_wm_level *result) { bool ret; @@ -2324,7 +2316,7 @@ static bool ilk_check_wm(int level, static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, int level, struct hsw_pipe_wm_parameters *p, - struct hsw_lp_wm_result *result) + struct intel_wm_level *result) { uint16_t pri_latency = dev_priv->wm.pri_latency[level]; uint16_t spr_latency = dev_priv->wm.spr_latency[level]; @@ -2347,10 +2339,10 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, int level, struct hsw_wm_maximums *max, struct hsw_pipe_wm_parameters *params, - struct hsw_lp_wm_result *result) + struct intel_wm_level *result) { enum pipe pipe; - struct hsw_lp_wm_result res[3]; + struct intel_wm_level res[3]; for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]); @@ -2584,7 +2576,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct hsw_lp_wm_result lp_results[4] = {}; + struct intel_wm_level lp_results[4] = {}; enum pipe pipe; int level, max_level, wm_lp; @@ -2607,7 +2599,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, memset(results, 0, sizeof(*results)); for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - const struct hsw_lp_wm_result *r; + const struct intel_wm_level *r; level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; if (level > max_level) -- cgit v1.2.3 From 158ae64f820939473012dacfc0ae1ec782b45b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 13:28:19 +0300 Subject: drm/i915: Calculate max watermark levels for ILK+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are quite a few variables we need to take into account to determine the maximum watermark levels, so it feels a bit cleaner to calculate those rather than just have a bunch of what look like magic numbers. v2: s/pipes_active/num_pipes_active s/othwewise/otherwise Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 119 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d7bb61efca2d..a4c3c5497941 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2270,6 +2270,104 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, params->pri_bytes_per_pixel); } +static unsigned int ilk_display_fifo_size(const struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen >= 7) + return 768; + else + return 512; +} + +/* Calculate the maximum primary/sprite plane watermark */ +static unsigned int ilk_plane_wm_max(const struct drm_device *dev, + int level, + unsigned int num_pipes_active, + bool sprite_enabled, + enum intel_ddb_partitioning ddb_partitioning, + bool is_sprite) +{ + unsigned int fifo_size = ilk_display_fifo_size(dev); + unsigned int max; + + /* if sprites aren't enabled, sprites get nothing */ + if (is_sprite && !sprite_enabled) + return 0; + + /* HSW allows LP1+ watermarks even with multiple pipes */ + if (level == 0 || num_pipes_active > 1) { + fifo_size /= INTEL_INFO(dev)->num_pipes; + + /* + * For some reason the non self refresh + * FIFO size is only half of the self + * refresh FIFO size on ILK/SNB. + */ + if (INTEL_INFO(dev)->gen <= 6) + fifo_size /= 2; + } + + if (sprite_enabled) { + /* level 0 is always calculated with 1:1 split */ + if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { + if (is_sprite) + fifo_size *= 5; + fifo_size /= 6; + } else { + fifo_size /= 2; + } + } + + /* clamp to max that the registers can hold */ + if (INTEL_INFO(dev)->gen >= 7) + /* IVB/HSW primary/sprite plane watermarks */ + max = level == 0 ? 127 : 1023; + else if (!is_sprite) + /* ILK/SNB primary plane watermarks */ + max = level == 0 ? 127 : 511; + else + /* ILK/SNB sprite plane watermarks */ + max = level == 0 ? 63 : 255; + + return min(fifo_size, max); +} + +/* Calculate the maximum cursor plane watermark */ +static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, + int level, unsigned int num_pipes_active) +{ + /* HSW LP1+ watermarks w/ multiple pipes */ + if (level > 0 && num_pipes_active > 1) + return 64; + + /* otherwise just report max that registers can hold */ + if (INTEL_INFO(dev)->gen >= 7) + return level == 0 ? 63 : 255; + else + return level == 0 ? 31 : 63; +} + +/* Calculate the maximum FBC watermark */ +static unsigned int ilk_fbc_wm_max(void) +{ + /* max that registers can hold */ + return 15; +} + +static void ilk_wm_max(struct drm_device *dev, + int level, + unsigned int num_pipes_active, + bool sprite_enabled, + enum intel_ddb_partitioning ddb_partitioning, + struct hsw_wm_maximums *max) +{ + max->pri = ilk_plane_wm_max(dev, level, num_pipes_active, + sprite_enabled, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev, level, num_pipes_active, + sprite_enabled, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev, level, num_pipes_active); + max->fbc = ilk_fbc_wm_max(); +} + static bool ilk_check_wm(int level, const struct hsw_wm_maximums *max, struct intel_wm_level *result) @@ -2555,18 +2653,15 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, sprites_enabled++; } - if (pipes_active > 1) { - lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256; - lp_max_1_2->spr = lp_max_5_6->spr = 128; - lp_max_1_2->cur = lp_max_5_6->cur = 64; - } else { - lp_max_1_2->pri = sprites_enabled ? 384 : 768; - lp_max_5_6->pri = sprites_enabled ? 128 : 768; - lp_max_1_2->spr = 384; - lp_max_5_6->spr = 640; - lp_max_1_2->cur = lp_max_5_6->cur = 255; - } - lp_max_1_2->fbc = lp_max_5_6->fbc = 15; + ilk_wm_max(dev, 1, pipes_active, sprites_enabled, + INTEL_DDB_PART_1_2, lp_max_1_2); + + /* 5/6 split only in single pipe config on IVB+ */ + if (INTEL_INFO(dev)->gen >= 7 && pipes_active <= 1) + ilk_wm_max(dev, 1, pipes_active, sprites_enabled, + INTEL_DDB_PART_5_6, lp_max_5_6); + else + *lp_max_5_6 = *lp_max_1_2; } static void hsw_compute_wm_results(struct drm_device *dev, -- cgit v1.2.3 From 240264f49edbe02eb96b472ae1c518cc413f9d01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 13:29:12 +0300 Subject: drm/i915: Pull some watermarks state into a separate structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a bunch of global state that needs to be considered when checking watermarks for validity. Move most of that to a new structure intel_wm_config, to avoid having to pass around so many variables. One notable thing left out is the DDB partitioning information, since we often anyway need to check the same watermarks against both 1/2 and 5/6 DDB partitioning layouts. v2: s/pipes_active/num_pipes_active Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 48 +++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a4c3c5497941..550f787a3b4a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2188,6 +2188,14 @@ struct hsw_wm_values { bool enable_fbc_wm; }; +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; + bool fbc_wm_enabled; +}; + /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. @@ -2281,8 +2289,7 @@ static unsigned int ilk_display_fifo_size(const struct drm_device *dev) /* Calculate the maximum primary/sprite plane watermark */ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, int level, - unsigned int num_pipes_active, - bool sprite_enabled, + const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, bool is_sprite) { @@ -2290,11 +2297,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, unsigned int max; /* if sprites aren't enabled, sprites get nothing */ - if (is_sprite && !sprite_enabled) + if (is_sprite && !config->sprites_enabled) return 0; /* HSW allows LP1+ watermarks even with multiple pipes */ - if (level == 0 || num_pipes_active > 1) { + if (level == 0 || config->num_pipes_active > 1) { fifo_size /= INTEL_INFO(dev)->num_pipes; /* @@ -2306,7 +2313,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, fifo_size /= 2; } - if (sprite_enabled) { + if (config->sprites_enabled) { /* level 0 is always calculated with 1:1 split */ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { if (is_sprite) @@ -2333,10 +2340,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, /* Calculate the maximum cursor plane watermark */ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, - int level, unsigned int num_pipes_active) + int level, + const struct intel_wm_config *config) { /* HSW LP1+ watermarks w/ multiple pipes */ - if (level > 0 && num_pipes_active > 1) + if (level > 0 && config->num_pipes_active > 1) return 64; /* otherwise just report max that registers can hold */ @@ -2355,16 +2363,13 @@ static unsigned int ilk_fbc_wm_max(void) static void ilk_wm_max(struct drm_device *dev, int level, - unsigned int num_pipes_active, - bool sprite_enabled, + const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, struct hsw_wm_maximums *max) { - max->pri = ilk_plane_wm_max(dev, level, num_pipes_active, - sprite_enabled, ddb_partitioning, false); - max->spr = ilk_plane_wm_max(dev, level, num_pipes_active, - sprite_enabled, ddb_partitioning, true); - max->cur = ilk_cursor_wm_max(dev, level, num_pipes_active); + max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev, level, config); max->fbc = ilk_fbc_wm_max(); } @@ -2614,7 +2619,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, struct drm_crtc *crtc; struct drm_plane *plane; enum pipe pipe; - int pipes_active = 0, sprites_enabled = 0; + struct intel_wm_config config = {}; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -2627,7 +2632,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, if (!p->active) continue; - pipes_active++; + config.num_pipes_active++; p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); @@ -2649,17 +2654,14 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel; p->spr_horiz_pixels = intel_plane->wm.horiz_pixels; - if (p->sprite_enabled) - sprites_enabled++; + config.sprites_enabled |= p->sprite_enabled; } - ilk_wm_max(dev, 1, pipes_active, sprites_enabled, - INTEL_DDB_PART_1_2, lp_max_1_2); + ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); /* 5/6 split only in single pipe config on IVB+ */ - if (INTEL_INFO(dev)->gen >= 7 && pipes_active <= 1) - ilk_wm_max(dev, 1, pipes_active, sprites_enabled, - INTEL_DDB_PART_5_6, lp_max_5_6); + if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) + ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); else *lp_max_5_6 = *lp_max_1_2; } -- cgit v1.2.3 From c35426d2bc25b242ee2a9a7a1d62634be1e86bb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 13:29:50 +0300 Subject: drm/i915: Split plane watermark parameters into a separate struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Give a name to the plane watermark related data we have currently stored under intel_plane->wm. We also observe that this data is more or less the same that we have in the hsw_pipe_wm_parameters structure, so use it there as well. v2: Make pahole happier Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 14 +++++----- drivers/gpu/drm/i915/intel_pm.c | 57 +++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7df662bab280..3ea8e5fe4407 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -331,6 +331,13 @@ struct intel_crtc { bool pch_fifo_underrun_disabled; }; +struct intel_plane_wm_parameters { + uint32_t horiz_pixels; + uint8_t bytes_per_pixel; + bool enabled; + bool scaled; +}; + struct intel_plane { struct drm_plane base; int plane; @@ -349,12 +356,7 @@ struct intel_plane { * as the other pieces of the struct may not reflect the values we want * for the watermark calculations. Currently only Haswell uses this. */ - struct { - bool enabled; - bool scaled; - uint8_t bytes_per_pixel; - uint32_t horiz_pixels; - } wm; + struct intel_plane_wm_parameters wm; void (*update_plane)(struct drm_plane *plane, struct drm_framebuffer *fb, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 550f787a3b4a..ed772fecbb07 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2162,15 +2162,11 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, struct hsw_pipe_wm_parameters { bool active; - bool sprite_enabled; - uint8_t pri_bytes_per_pixel; - uint8_t spr_bytes_per_pixel; - uint8_t cur_bytes_per_pixel; - uint32_t pri_horiz_pixels; - uint32_t spr_horiz_pixels; - uint32_t cur_horiz_pixels; uint32_t pipe_htotal; uint32_t pixel_rate; + struct intel_plane_wm_parameters pri; + struct intel_plane_wm_parameters spr; + struct intel_plane_wm_parameters cur; }; struct hsw_wm_maximums { @@ -2206,12 +2202,11 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, { uint32_t method1, method2; - /* TODO: for now, assume the primary plane is always enabled. */ - if (!params->active) + if (!params->active || !params->pri.enabled) return 0; method1 = ilk_wm_method1(params->pixel_rate, - params->pri_bytes_per_pixel, + params->pri.bytes_per_pixel, mem_value); if (!is_lp) @@ -2219,8 +2214,8 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->pri_horiz_pixels, - params->pri_bytes_per_pixel, + params->pri.horiz_pixels, + params->pri.bytes_per_pixel, mem_value); return min(method1, method2); @@ -2235,16 +2230,16 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, { uint32_t method1, method2; - if (!params->active || !params->sprite_enabled) + if (!params->active || !params->spr.enabled) return 0; method1 = ilk_wm_method1(params->pixel_rate, - params->spr_bytes_per_pixel, + params->spr.bytes_per_pixel, mem_value); method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->spr_horiz_pixels, - params->spr_bytes_per_pixel, + params->spr.horiz_pixels, + params->spr.bytes_per_pixel, mem_value); return min(method1, method2); } @@ -2256,13 +2251,13 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { - if (!params->active) + if (!params->active || !params->cur.enabled) return 0; return ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->cur_horiz_pixels, - params->cur_bytes_per_pixel, + params->cur.horiz_pixels, + params->cur.bytes_per_pixel, mem_value); } @@ -2270,12 +2265,12 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, uint32_t pri_val) { - if (!params->active) + if (!params->active || !params->pri.enabled) return 0; return ilk_wm_fbc(pri_val, - params->pri_horiz_pixels, - params->pri_bytes_per_pixel); + params->pri.horiz_pixels, + params->pri.bytes_per_pixel); } static unsigned int ilk_display_fifo_size(const struct drm_device *dev) @@ -2636,11 +2631,14 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); - p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8; - p->cur_bytes_per_pixel = 4; - p->pri_horiz_pixels = + p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; + p->cur.bytes_per_pixel = 4; + p->pri.horiz_pixels = intel_crtc->config.requested_mode.hdisplay; - p->cur_horiz_pixels = 64; + p->cur.horiz_pixels = 64; + /* TODO: for now, assume primary and cursor planes are always enabled. */ + p->pri.enabled = true; + p->cur.enabled = true; } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { @@ -2650,11 +2648,10 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, pipe = intel_plane->pipe; p = ¶ms[pipe]; - p->sprite_enabled = intel_plane->wm.enabled; - p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel; - p->spr_horiz_pixels = intel_plane->wm.horiz_pixels; + p->spr = intel_plane->wm; - config.sprites_enabled |= p->sprite_enabled; + config.sprites_enabled |= p->spr.enabled; + config.sprites_scaled |= p->spr.scaled; } ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); -- cgit v1.2.3 From b39d53f624d50d1588933e0ab17f19a5f2da5d94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:09 +0300 Subject: drm/i915: Pass crtc to our update/disable_plane hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're going to want to know which CRTC we're dealing with, so pass it down to the update/disable_plane hooks. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 4 +++- drivers/gpu/drm/i915/intel_sprite.c | 21 ++++++++++++--------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3ea8e5fe4407..da394f354453 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -359,13 +359,15 @@ struct intel_plane { struct intel_plane_wm_parameters wm; void (*update_plane)(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h); - void (*disable_plane)(struct drm_plane *plane); + void (*disable_plane)(struct drm_plane *plane, + struct drm_crtc *crtc); int (*update_colorkey)(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key); void (*get_colorkey)(struct drm_plane *plane, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 5a36afb6ea03..d4e0592e3389 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -38,7 +38,8 @@ #include "i915_drv.h" static void -vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, +vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -140,7 +141,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, } static void -vlv_disable_plane(struct drm_plane *dplane) +vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -207,7 +208,8 @@ vlv_get_colorkey(struct drm_plane *dplane, } static void -ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -320,7 +322,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, } static void -ivb_disable_plane(struct drm_plane *plane) +ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -400,7 +402,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) } static void -ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -488,7 +491,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, } static void -ilk_disable_plane(struct drm_plane *plane) +ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -823,11 +826,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, intel_enable_primary(crtc); if (visible) - intel_plane->update_plane(plane, fb, obj, + intel_plane->update_plane(plane, crtc, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); else - intel_plane->disable_plane(plane); + intel_plane->disable_plane(plane, crtc); if (disable_primary) intel_disable_primary(crtc); @@ -862,7 +865,7 @@ intel_disable_plane(struct drm_plane *plane) if (plane->crtc) intel_enable_primary(plane->crtc); - intel_plane->disable_plane(plane); + intel_plane->disable_plane(plane, plane->crtc); if (!intel_plane->obj) goto out; -- cgit v1.2.3 From 88a94a58a07267d979cc168c3e511b99f4164951 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 7 Aug 2013 13:30:23 +0300 Subject: drm/i915: Don't try to disable plane if it's already disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check plane->fb in intel_disable_plane() to determine if the plane is already disabled. If the plane has an fb, then it must also have a crtc, so we can drop the plane->crtc check and just call intel_enable_primary() directly. v2: WARN and bail if the plane doesn't have a crtc when it should Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d4e0592e3389..0a174d7e5854 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -863,8 +863,13 @@ intel_disable_plane(struct drm_plane *plane) struct intel_plane *intel_plane = to_intel_plane(plane); int ret = 0; - if (plane->crtc) - intel_enable_primary(plane->crtc); + if (!plane->fb) + return 0; + + if (WARN_ON(!plane->crtc)) + return -EINVAL; + + intel_enable_primary(plane->crtc); intel_plane->disable_plane(plane, plane->crtc); if (!intel_plane->obj) -- cgit v1.2.3 From adf3d35e4aced032f0449c6d69b0a90fea14692f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:11 +0300 Subject: drm/i915: Pass plane and crtc to intel_update_sprite_watermarks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're going to want to know the crtc in the watermark code to avoid doing more work than we have to. We should also pass the plane we're disabling so that we know where to stick our watermark parameters without having to go look the plane up. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/intel_drv.h | 3 ++- drivers/gpu/drm/i915/intel_pm.c | 34 ++++++++++++++++------------------ drivers/gpu/drm/i915/intel_sprite.c | 8 ++++---- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2cfa21fbedce..550ad171628c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -359,7 +359,8 @@ struct drm_i915_display_funcs { struct dpll *match_clock, struct dpll *best_clock); void (*update_wm)(struct drm_device *dev); - void (*update_sprite_wm)(struct drm_device *dev, int pipe, + void (*update_sprite_wm)(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, bool enable, bool scaled); void (*modeset_global_resources)(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index da394f354453..caf8b8dfe17a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -716,7 +716,8 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port); /* For use by IVB LP watermark workaround in intel_sprite.c */ extern void intel_update_watermarks(struct drm_device *dev); -extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, +extern void intel_update_sprite_watermarks(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed772fecbb07..023e287da01f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2866,25 +2866,19 @@ static void haswell_update_wm(struct drm_device *dev) hsw_write_wm_values(dev_priv, best_results, partitioning); } -static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, +static void haswell_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled) { - struct drm_plane *plane; - - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_plane *intel_plane = to_intel_plane(plane); - if (intel_plane->pipe == pipe) { - intel_plane->wm.enabled = enabled; - intel_plane->wm.scaled = scaled; - intel_plane->wm.horiz_pixels = sprite_width; - intel_plane->wm.bytes_per_pixel = pixel_size; - break; - } - } + intel_plane->wm.enabled = enabled; + intel_plane->wm.scaled = scaled; + intel_plane->wm.horiz_pixels = sprite_width; + intel_plane->wm.bytes_per_pixel = pixel_size; - haswell_update_wm(dev); + haswell_update_wm(plane->dev); } static bool @@ -2963,11 +2957,14 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, return *sprite_wm > 0x3ff ? false : true; } -static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, +static void sandybridge_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled) { + struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_plane(plane)->pipe; int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ u32 val; int sprite_wm, reg; @@ -3086,14 +3083,15 @@ void intel_update_watermarks(struct drm_device *dev) dev_priv->display.update_wm(dev); } -void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, +void intel_update_sprite_watermarks(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = plane->dev->dev_private; if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, + dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, pixel_size, enabled, scaled); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0a174d7e5854..05742f7d7006 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -109,7 +109,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, sprctl |= SP_ENABLE; - intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, + intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -265,7 +265,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (IS_HASWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; - intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, + intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -340,7 +340,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) dev_priv->sprite_scaling_enabled &= ~(1 << pipe); - intel_update_sprite_watermarks(dev, pipe, 0, 0, false, false); + intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); /* potentially re-enable LP watermarks */ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) @@ -455,7 +455,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; - intel_update_sprite_watermarks(dev, pipe, src_w, pixel_size, true, + intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ -- cgit v1.2.3 From a95fd8cae06dadf4a3eb88c9c130e86c5b0c1723 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 6 Aug 2013 22:24:12 +0300 Subject: drm/i915: Always call intel_update_sprite_watermarks() when disabling a plane MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ILK and VLV codepaths didn't update sprite watermarks when disabling a sprite. Make them do that. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 05742f7d7006..78b621cdd108 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -154,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) /* Activate double buffered register update */ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); POSTING_READ(SPSURF(pipe, plane)); + + intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); } static int @@ -504,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) /* Flush double buffered register updates */ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); POSTING_READ(DVSSURF(pipe)); + + intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); } static void -- cgit v1.2.3 From 6d2b888569d366beb4be72cacfde41adee2c25e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 7 Aug 2013 18:30:54 +0100 Subject: drm/i915: List objects allocated from stolen memory in debugfs I was curious as to what objects were currently allocated from stolen memory, and so exported it from debugfs. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 63 +++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a1f4c91fb112..1a87cc9fd899 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "intel_drv.h" #include "intel_ringbuffer.h" @@ -188,6 +189,67 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } +static int obj_rank_by_stolen(void *priv, + struct list_head *A, struct list_head *B) +{ + struct drm_i915_gem_object *a = + container_of(A, struct drm_i915_gem_object, exec_list); + struct drm_i915_gem_object *b = + container_of(B, struct drm_i915_gem_object, exec_list); + + return a->stolen->start - b->stolen->start; +} + +static int i915_gem_stolen_list_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + size_t total_obj_size, total_gtt_size; + LIST_HEAD(stolen); + int count, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + total_obj_size = total_gtt_size = count = 0; + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (obj->stolen == NULL) + continue; + + list_add(&obj->exec_list, &stolen); + + total_obj_size += obj->base.size; + total_gtt_size += i915_gem_obj_ggtt_size(obj); + count++; + } + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + if (obj->stolen == NULL) + continue; + + list_add(&obj->exec_list, &stolen); + + total_obj_size += obj->base.size; + count++; + } + list_sort(NULL, &stolen, obj_rank_by_stolen); + seq_puts(m, "Stolen:\n"); + while (!list_empty(&stolen)) { + obj = list_first_entry(&stolen, typeof(*obj), exec_list); + seq_puts(m, " "); + describe_obj(m, obj); + seq_putc(m, '\n'); + list_del_init(&obj->exec_list); + } + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); + return 0; +} + #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ size += i915_gem_obj_ggtt_size(obj); \ @@ -2114,6 +2176,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, -- cgit v1.2.3 From 00fd78e5279aec3aa504307ff2db892d3efb555d Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Thu, 8 Aug 2013 22:19:12 +0200 Subject: drm: provide agp dummies for CONFIG_AGP=n We currently rely on gcc dead-code elimination so the drm_agp_* helpers are not called if drm_core_has_AGP() is false. That's ugly as hell so provide "static inline" dummies for the case that AGP is disabled. Fixes a build-regression introduced by: commit 28ec711cd427f8b61f73712a43b8100ba8ca933b Author: David Herrmann Date: Sat Jul 27 16:37:00 2013 +0200 drm/agp: move AGP cleanup paths to drm_agpsupport.c v2: switch #ifdef -> #if (spotted by Stephen) Cc: Laurent Pinchart Cc: Daniel Vetter Tested-by: Stephen Warren Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- include/drm/drmP.h | 49 +---------- include/drm/drm_agpsupport.h | 194 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 196 insertions(+), 47 deletions(-) create mode 100644 include/drm/drm_agpsupport.h diff --git a/include/drm/drmP.h b/include/drm/drmP.h index fba547368a20..3ecdde6274be 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -62,10 +62,8 @@ #endif #include #include -#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) #include #include -#endif #include #include #include @@ -1226,16 +1224,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev) return dev->driver->bus->get_irq(dev); } - -#if __OS_HAS_AGP -static inline int drm_core_has_AGP(struct drm_device *dev) -{ - return drm_core_check_feature(dev, DRIVER_USE_AGP); -} -#else -#define drm_core_has_AGP(dev) (0) -#endif - #if __OS_HAS_MTRR static inline int drm_core_has_MTRR(struct drm_device *dev) { @@ -1292,14 +1280,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Memory management support (drm_memory.h) */ #include -extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); -extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); -extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, - struct page **pages, - unsigned long num_pages, - uint32_t gtt_offset, - uint32_t type); -extern int drm_unbind_agp(DRM_AGP_MEM * handle); /* Misc. IOCTL support (drm_ioctl.h) */ extern int drm_irq_by_busid(struct drm_device *dev, void *data, @@ -1453,33 +1433,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* AGP/GART support (drm_agpsupport.h) */ -extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); -extern void drm_agp_destroy(struct drm_agp_head *agp); -extern void drm_agp_clear(struct drm_device *dev); -extern int drm_agp_acquire(struct drm_device *dev); -extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_release(struct drm_device *dev); -extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); + +#include /* Stub support (drm_stub.h) */ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h new file mode 100644 index 000000000000..a184eeee9c96 --- /dev/null +++ b/include/drm/drm_agpsupport.h @@ -0,0 +1,194 @@ +#ifndef _DRM_AGPSUPPORT_H_ +#define _DRM_AGPSUPPORT_H_ + +#include +#include +#include +#include +#include +#include + +#if __OS_HAS_AGP + +void drm_free_agp(DRM_AGP_MEM * handle, int pages); +int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); +int drm_unbind_agp(DRM_AGP_MEM * handle); +DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type); + +struct drm_agp_head *drm_agp_init(struct drm_device *dev); +void drm_agp_destroy(struct drm_agp_head *agp); +void drm_agp_clear(struct drm_device *dev); +int drm_agp_acquire(struct drm_device *dev); +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_release(struct drm_device *dev); +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +static inline int drm_core_has_AGP(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_USE_AGP); +} + +#else /* __OS_HAS_AGP */ + +static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages) +{ +} + +static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) +{ + return -ENODEV; +} + +static inline int drm_unbind_agp(DRM_AGP_MEM * handle) +{ + return -ENODEV; +} + +static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type) +{ + return NULL; +} + +static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline void drm_agp_destroy(struct drm_agp_head *agp) +{ +} + +static inline void drm_agp_clear(struct drm_device *dev) +{ +} + +static inline int drm_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -ENODEV; +} + +static inline int drm_core_has_AGP(struct drm_device *dev) +{ + return 0; +} + +#endif /* __OS_HAS_AGP */ + +#endif /* _DRM_AGPSUPPORT_H_ */ -- cgit v1.2.3 From a2367166fb200528d6fd43859e917e80ee034e16 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:53 +0100 Subject: drm/i915: Remove stale prototypes Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 8 -------- drivers/gpu/drm/i915/intel_drv.h | 2 -- 2 files changed, 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 550ad171628c..036b01636f78 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1676,7 +1676,6 @@ extern void intel_pm_init(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev); extern void intel_uncore_init(struct drm_device *dev); -extern void intel_uncore_reset(struct drm_device *dev); extern void intel_uncore_clear_errors(struct drm_device *dev); extern void intel_uncore_check_errors(struct drm_device *dev); @@ -1844,9 +1843,6 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error) void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); @@ -2035,8 +2031,6 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ -void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else @@ -2044,8 +2038,6 @@ int i915_verify_lists(struct drm_device *dev); #endif void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle); -void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index caf8b8dfe17a..d516c63fc94d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -649,12 +649,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector, extern void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old); -extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); -extern void intel_enable_clock_gating(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, -- cgit v1.2.3 From ac44bfac5b591c5f9b28b43f8e8ed08e9abf7f95 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:54 +0100 Subject: drm/i915: Remove i915_gem_object_check_coherency() This code was dead since: commit 432e58edc9de1d9c3d6a7b444b3c455b8f209a7d Author: Chris Wilson Date: Thu Nov 25 19:32:06 2010 +0000 drm/i915: Avoid allocation for execbuffer object list so just put it to rest for good. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 -- drivers/gpu/drm/i915/i915_gem_debug.c | 69 ----------------------------------- 2 files changed, 72 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 036b01636f78..3480f3b22366 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -201,7 +201,6 @@ struct intel_ddi_plls { #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 -#define WATCH_COHERENCY 0 #define WATCH_LISTS 0 #define WATCH_GTT 0 @@ -2036,8 +2035,6 @@ int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif -void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, - int handle); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index bf945a39fbb1..bcdbafc6c985 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -116,72 +116,3 @@ i915_verify_lists(struct drm_device *dev) return warned = err; } #endif /* WATCH_INACTIVE */ - -#if WATCH_COHERENCY -void -i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) -{ - struct drm_device *dev = obj->base.dev; - int page; - uint32_t *gtt_mapping; - uint32_t *backing_map = NULL; - int bad_count = 0; - - DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", - __func__, obj, obj->gtt_offset, handle, - obj->size / 1024); - - gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, - obj->base.size); - if (gtt_mapping == NULL) { - DRM_ERROR("failed to map GTT space\n"); - return; - } - - for (page = 0; page < obj->size / PAGE_SIZE; page++) { - int i; - - backing_map = kmap_atomic(obj->pages[page]); - - if (backing_map == NULL) { - DRM_ERROR("failed to map backing page\n"); - goto out; - } - - for (i = 0; i < PAGE_SIZE / 4; i++) { - uint32_t cpuval = backing_map[i]; - uint32_t gttval = readl(gtt_mapping + - page * 1024 + i); - - if (cpuval != gttval) { - DRM_INFO("incoherent CPU vs GPU at 0x%08x: " - "0x%08x vs 0x%08x\n", - (int)(obj->gtt_offset + - page * PAGE_SIZE + i * 4), - cpuval, gttval); - if (bad_count++ >= 8) { - DRM_INFO("...\n"); - goto out; - } - } - } - kunmap_atomic(backing_map); - backing_map = NULL; - } - - out: - if (backing_map != NULL) - kunmap_atomic(backing_map); - iounmap(gtt_mapping); - - /* give syslog time to catch up */ - msleep(1); - - /* Directly flush the object, since we just loaded values with the CPU - * from the backing pages and we don't want to disturb the cache - * management that we're trying to observe. - */ - - i915_gem_clflush_object(obj); -} -#endif -- cgit v1.2.3 From c55651b39a1fad0a6f07692971249eb54febfd73 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:55 +0100 Subject: drm/i915: Fix #endif comment Did you say OCD? Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index bcdbafc6c985..775d506b3208 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -115,4 +115,4 @@ i915_verify_lists(struct drm_device *dev) return warned = err; } -#endif /* WATCH_INACTIVE */ +#endif /* WATCH_LIST */ -- cgit v1.2.3 From a658b5d20de78435a971f26d56a765fb40f88e16 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:56 +0100 Subject: drm/i915: Make i915_hangcheck_elapsed() static Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_irq.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3480f3b22366..6abf8f9d9b14 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1665,7 +1665,6 @@ extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); -void i915_hangcheck_elapsed(unsigned long data); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6a1c207a296b..8a77faf4927d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1869,7 +1869,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) * we kick the ring. If we see no progress on three subsequent calls * we assume chip is wedged and try to fix it by resetting the chip. */ -void i915_hangcheck_elapsed(unsigned long data) +static void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; -- cgit v1.2.3 From 9237329d83b04cfc7d9fc4608e9db84f32280dc4 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:57 +0100 Subject: drm/i915: Make intel_encoder_dpms() static And also fix a small typo in the intel_encoder_dpms() comment. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- drivers/gpu/drm/i915/intel_drv.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4127ad2890f3..1f9766a4238e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3889,10 +3889,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder) kfree(intel_encoder); } -/* Simple dpms helper for encodres with just one connector, no cloning and only +/* Simple dpms helper for encoders with just one connector, no cloning and only * one kind of off state. It clamps all !ON modes to fully OFF and changes the * state of the entire output pipe. */ -void intel_encoder_dpms(struct intel_encoder *encoder, int mode) +static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) { if (mode == DRM_MODE_DPMS_ON) { encoder->connectors_active = true; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d516c63fc94d..09c919658735 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -583,7 +583,6 @@ extern void intel_crtc_restore_mode(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc); extern void intel_encoder_destroy(struct drm_encoder *encoder); -extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); extern void intel_connector_dpms(struct drm_connector *, int mode); extern bool intel_connector_get_hw_state(struct intel_connector *connector); extern void intel_modeset_check_state(struct drm_device *dev); -- cgit v1.2.3 From 1414f6c0497acc9ca73f492d1cf2a2b87bed950b Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:58 +0100 Subject: drm/i915: Remove intel_modeset_disable() Caught by the dead code police! Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 10 ---------- drivers/gpu/drm/i915/intel_drv.h | 1 - 2 files changed, 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1f9766a4238e..1fd891554e5f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3871,16 +3871,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) } } -void intel_modeset_disable(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled) - intel_crtc_disable(crtc); - } -} - void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 09c919658735..a70a0d04a0e5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -578,7 +578,6 @@ struct intel_set_config { extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb); -extern void intel_modeset_disable(struct drm_device *dev); extern void intel_crtc_restore_mode(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc); -- cgit v1.2.3 From e7457a9a333a95e51dd77515eea326a181f968bc Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 Aug 2013 22:28:59 +0100 Subject: drm/i915: Make intel_set_mode() static Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 10 +++++++--- drivers/gpu/drm/i915/intel_drv.h | 2 -- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1fd891554e5f..2972a0d68717 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -50,6 +50,10 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, static void ironlake_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config); +static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *old_fb); + + typedef struct { int min, max; } intel_range_t; @@ -8732,9 +8736,9 @@ out: return ret; } -int intel_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) +static int intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) { int ret; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a70a0d04a0e5..01455aa8b8bb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -576,8 +576,6 @@ struct intel_set_config { bool mode_changed; }; -extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *old_fb); extern void intel_crtc_restore_mode(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc); -- cgit v1.2.3 From 58e73e15708856540056050ae0798c322e43af18 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 9 Aug 2013 12:44:11 +0300 Subject: drm/i915: unbreak i915_gem_object_ggtt_unbind() There is an extra semi-colon here so we just leak and never unbind anything. This regression has been introduced in commit 07fe0b12800d4752d729d4122c01f41f80a5ba5a Author: Ben Widawsky Date: Wed Jul 31 17:00:10 2013 -0700 drm/i915: plumb VM into bind/unbind code Cc: Ben Widawsky Signed-off-by: Dan Carpenter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f5d389a20024..79cef3c9b1ad 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2664,7 +2664,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_address_space *ggtt = &dev_priv->gtt.base; - if (!i915_gem_obj_ggtt_bound(obj)); + if (!i915_gem_obj_ggtt_bound(obj)) return 0; if (obj->pin_count) -- cgit v1.2.3 From 16e54061ecc81df66e80ce96b3f91ae56065ed9e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 9 Aug 2013 13:07:31 +0300 Subject: drm/i915: fix a limit check in hsw_compute_wm_results() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The '!' here was not intended. Since '!' has higher precedence than compare, it means the check is never true. This regression was introduced in commit 71fff20ff1bb790f4defe0c880e028581ffab420 Author: Ville Syrjälä Date: Tue Aug 6 22:24:03 2013 +0300 drm/i915: Kill fbc_enable from hsw_lp_wm_results Signed-off-by: Dan Carpenter Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 023e287da01f..6643069eccdb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2685,7 +2685,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, * a WM level. */ results->enable_fbc_wm = true; for (level = 1; level <= max_level; level++) { - if (!lp_results[level - 1].fbc_val > lp_maximums->fbc) { + if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { results->enable_fbc_wm = false; lp_results[level - 1].fbc_val = 0; } -- cgit v1.2.3 From 6f6005a52b79c2b2e3d58d8ab63791c378ebf82c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 9 Aug 2013 09:34:35 -0700 Subject: drm/i915: expose HDMI connectors on port C on BYT Ryan noticed that on his board, HDMI was wired up to port C but not exposed by the kernel, which had only expected DP on that port. Fix that up by enumerating both ports if possible. Tested-by: "Matsumura, Ryan" Acked-by: Chris Wilson Signed-off-by: Jesse Barnes [danvet: Fix up the whitespace fail. Tsk.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2972a0d68717..370c902fa629 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9335,8 +9335,13 @@ static void intel_setup_outputs(struct drm_device *dev) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ - if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { + intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, + PORT_C); + if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, + PORT_C); + } if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, -- cgit v1.2.3 From 5c536613d8ebda3da0448550d0a997651a6048e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 9 Aug 2013 18:02:09 +0300 Subject: drm/i915: Fix FB WM for HSW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to a misplaced memset(), we never actually enabled the FBC WM on HSW. Move the memset() to happen a bit earlier, so that it won't clobber results->enable_fbc_wm. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6643069eccdb..3ac5fe9d428a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2681,6 +2681,8 @@ static void hsw_compute_wm_results(struct drm_device *dev, break; max_level = level - 1; + memset(results, 0, sizeof(*results)); + /* The spec says it is preferred to disable FBC WMs instead of disabling * a WM level. */ results->enable_fbc_wm = true; @@ -2691,7 +2693,6 @@ static void hsw_compute_wm_results(struct drm_device *dev, } } - memset(results, 0, sizeof(*results)); for (wm_lp = 1; wm_lp <= 3; wm_lp++) { const struct intel_wm_level *r; -- cgit v1.2.3 From 6811b1bea98462e228fef2172c36f1543ac156fe Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 3 Jun 2013 10:53:48 +0200 Subject: drm/rcar-du: Add missing alpha plane register definitions Several alpha plane register definitions are missing, add them. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_regs.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 69f21f19b51c..3aba27ffc065 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -349,10 +349,25 @@ #define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */ #define APnMWR 0x0a104 + +#define APnDSXR 0x0a110 +#define APnDSYR 0x0a114 +#define APnDPXR 0x0a118 +#define APnDPYR 0x0a11c + #define APnDSA0R 0x0a120 #define APnDSA1R 0x0a124 #define APnDSA2R 0x0a128 + +#define APnSPXR 0x0a130 +#define APnSPYR 0x0a134 +#define APnWASPR 0x0a138 +#define APnWAMWR 0x0a13c + +#define APnBTR 0x0a140 + #define APnMLR 0x0a150 +#define APnSWAPR 0x0a180 /* ----------------------------------------------------------------------------- * Display Capture Registers -- cgit v1.2.3 From d5b6dcc45950bc727f6a02d0ee68c99d0b6052ea Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 02:29:07 +0200 Subject: drm/rcar-du: Use devm_ioremap_resource() Replace the devm_request_mem_region() and devm_ioremap_nocache() calls with devm_ioremap_resource(). Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index dc0fe09b2ba1..f776b1c66ef4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -107,7 +107,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) struct platform_device *pdev = dev->platformdev; struct rcar_du_platform_data *pdata = pdev->dev.platform_data; struct rcar_du_device *rcdu; - struct resource *ioarea; struct resource *mem; int ret; @@ -129,24 +128,9 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) /* I/O resources and clocks */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (mem == NULL) { - dev_err(&pdev->dev, "failed to get memory resource\n"); - return -EINVAL; - } - - ioarea = devm_request_mem_region(&pdev->dev, mem->start, - resource_size(mem), pdev->name); - if (ioarea == NULL) { - dev_err(&pdev->dev, "failed to request memory region\n"); - return -EBUSY; - } - - rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start, - resource_size(ioarea)); - if (rcdu->mmio == NULL) { - dev_err(&pdev->dev, "failed to remap memory resource\n"); - return -ENOMEM; - } + rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rcdu->mmio)) + return PTR_ERR(rcdu->mmio); rcdu->clock = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rcdu->clock)) { -- cgit v1.2.3 From 481d342e3500e71a88cac79a6fab7b62f7203c7c Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jun 2013 13:38:33 +0200 Subject: drm/rcar-du: Add platform module device table The platform device id driver data field points to a device information structure that only contains a (currently empty) features field for now. Support for additional model-dependent features will be added later. Only the R8A7779 variant is currently supported. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 13 +++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 15 +++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index f776b1c66ef4..bb7193d79dfe 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -123,6 +123,7 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) rcdu->dev = &pdev->dev; rcdu->pdata = pdata; + rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data; rcdu->ddev = dev; dev->dev_private = rcdu; @@ -297,6 +298,17 @@ static int rcar_du_remove(struct platform_device *pdev) return 0; } +static const struct rcar_du_device_info rcar_du_r8a7779_info = { + .features = 0, +}; + +static const struct platform_device_id rcar_du_id_table[] = { + { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info }, + { } +}; + +MODULE_DEVICE_TABLE(platform, rcar_du_id_table); + static struct platform_driver rcar_du_platform_driver = { .probe = rcar_du_probe, .remove = rcar_du_remove, @@ -305,6 +317,7 @@ static struct platform_driver rcar_du_platform_driver = { .name = "rcar-du", .pm = &rcar_du_pm_ops, }, + .id_table = rcar_du_id_table, }; module_platform_driver(rcar_du_platform_driver); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 193cc59d495c..06dbf4ff139c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -25,9 +25,18 @@ struct clk; struct device; struct drm_device; +/* + * struct rcar_du_device_info - DU model-specific information + * @features: device features (RCAR_DU_FEATURE_*) + */ +struct rcar_du_device_info { + unsigned int features; +}; + struct rcar_du_device { struct device *dev; const struct rcar_du_platform_data *pdata; + const struct rcar_du_device_info *info; void __iomem *mmio; struct clk *clock; @@ -50,6 +59,12 @@ struct rcar_du_device { } planes; }; +static inline bool rcar_du_has(struct rcar_du_device *rcdu, + unsigned int feature) +{ + return rcdu->info->features & feature; +} + int rcar_du_get(struct rcar_du_device *rcdu); void rcar_du_put(struct rcar_du_device *rcdu); -- cgit v1.2.3 From f66ee304ae8990bd31fa639b775a840d6757d746 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jun 2013 14:15:01 +0200 Subject: drm/rcar-du: Support per-CRTC clock and IRQ Some of the DU revisions use one clock and IRQ per CRTC instead of one clock and IRQ per device. Retrieve the correct clock and register the correct IRQ for each CRTC. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 120 +++++++++++++++++++++++++-------- drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 2 +- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 52 +++----------- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 3 +- 4 files changed, 103 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 24183fb93592..aefc8a0cbcbc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -69,6 +69,30 @@ static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); } +static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) +{ + struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + int ret; + + ret = clk_prepare_enable(rcrtc->clock); + if (ret < 0) + return ret; + + ret = rcar_du_get(rcdu); + if (ret < 0) + clk_disable_unprepare(rcrtc->clock); + + return ret; +} + +static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) +{ + struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + + rcar_du_put(rcdu); + clk_disable_unprepare(rcrtc->clock); +} + static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { struct drm_crtc *crtc = &rcrtc->crtc; @@ -79,7 +103,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) u32 div; /* Dot clock */ - clk = clk_get_rate(rcdu->clock); + clk = clk_get_rate(rcrtc->clock); div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000); div = clamp(div, 1U, 64U) - 1; @@ -313,20 +337,16 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; - rcar_du_crtc_stop(rcrtc); - rcar_du_put(rcdu); + rcar_du_crtc_put(rcrtc); } void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; - if (rcrtc->dpms != DRM_MODE_DPMS_ON) return; - rcar_du_get(rcdu); + rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); } @@ -340,18 +360,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode) { - struct rcar_du_device *rcdu = crtc->dev->dev_private; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); if (rcrtc->dpms == mode) return; if (mode == DRM_MODE_DPMS_ON) { - rcar_du_get(rcdu); + rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); } else { rcar_du_crtc_stop(rcrtc); - rcar_du_put(rcdu); + rcar_du_crtc_put(rcrtc); } rcrtc->dpms = mode; @@ -367,13 +386,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc, static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) { - struct rcar_du_device *rcdu = crtc->dev->dev_private; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); /* We need to access the hardware during mode set, acquire a reference - * to the DU. + * to the CRTC. */ - rcar_du_get(rcdu); + rcar_du_crtc_get(rcrtc); /* Stop the CRTC and release the plane. Force the DPMS mode to off as a * result. @@ -423,10 +441,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc, error: /* There's no rollback/abort operation to clean up in case of error. We - * thus need to release the reference to the DU acquired in prepare() + * thus need to release the reference to the CRTC acquired in prepare() * here. */ - rcar_du_put(rcdu); + rcar_du_crtc_put(rcrtc); return ret; } @@ -514,6 +532,24 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) drm_vblank_put(dev, rcrtc->index); } +static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) +{ + struct rcar_du_crtc *rcrtc = arg; + irqreturn_t ret = IRQ_NONE; + u32 status; + + status = rcar_du_crtc_read(rcrtc, DSSR); + rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); + + if (status & DSSR_VBK) { + drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); + rcar_du_crtc_finish_page_flip(rcrtc); + ret = IRQ_HANDLED; + } + + return ret; +} + static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) @@ -551,10 +587,29 @@ static const struct drm_crtc_funcs crtc_funcs = { int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) { + struct platform_device *pdev = to_platform_device(rcdu->dev); struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; struct drm_crtc *crtc = &rcrtc->crtc; + unsigned int irqflags; + char clk_name[5]; + char *name; + int irq; int ret; + /* Get the CRTC clock. */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { + sprintf(clk_name, "du.%u", index); + name = clk_name; + } else { + name = NULL; + } + + rcrtc->clock = devm_clk_get(rcdu->dev, name); + if (IS_ERR(rcrtc->clock)) { + dev_err(rcdu->dev, "no clock for CRTC %u\n", index); + return PTR_ERR(rcrtc->clock); + } + rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; rcrtc->index = index; rcrtc->dpms = DRM_MODE_DPMS_OFF; @@ -568,6 +623,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) drm_crtc_helper_add(crtc, &crtc_helper_funcs); + /* Register the interrupt handler. */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { + irq = platform_get_irq(pdev, index); + irqflags = 0; + } else { + irq = platform_get_irq(pdev, 0); + irqflags = IRQF_SHARED; + } + + if (irq < 0) { + dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index); + return ret; + } + + ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags, + dev_name(rcdu->dev), rcrtc); + if (ret < 0) { + dev_err(rcdu->dev, + "failed to register IRQ for CRTC %u\n", index); + return ret; + } + return 0; } @@ -580,16 +657,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable) rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); } } - -void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc) -{ - u32 status; - - status = rcar_du_crtc_read(rcrtc, DSSR); - rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); - - if (status & DSSR_VBK) { - drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); - rcar_du_crtc_finish_page_flip(rcrtc); - } -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 2a0365bcbd14..5b69e98a3b92 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -25,6 +25,7 @@ struct rcar_du_plane; struct rcar_du_crtc { struct drm_crtc crtc; + struct clk *clock; unsigned int mmio_offset; unsigned int index; bool started; @@ -38,7 +39,6 @@ struct rcar_du_crtc { int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index); void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); -void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, struct drm_file *file); void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index bb7193d79dfe..5f82e046f83e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -35,8 +35,8 @@ /* * rcar_du_get - Acquire a reference to the DU * - * Acquiring a reference enables the device clock and setup core registers. A - * reference must be held before accessing any hardware registers. + * Acquiring the first reference setups core registers. A reference must be + * held before accessing any hardware registers. * * This function must be called with the DRM mode_config lock held. * @@ -44,16 +44,9 @@ */ int rcar_du_get(struct rcar_du_device *rcdu) { - int ret; - if (rcdu->use_count) goto done; - /* Enable clocks before accessing the hardware. */ - ret = clk_prepare_enable(rcdu->clock); - if (ret < 0) - return ret; - /* Enable extended features */ rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE); rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G); @@ -74,16 +67,11 @@ done: /* * rcar_du_put - Release a reference to the DU * - * Releasing the last reference disables the device clock. - * * This function must be called with the DRM mode_config lock held. */ void rcar_du_put(struct rcar_du_device *rcdu) { - if (--rcdu->use_count) - return; - - clk_disable_unprepare(rcdu->clock); + --rcdu->use_count; } /* ----------------------------------------------------------------------------- @@ -95,8 +83,8 @@ static int rcar_du_unload(struct drm_device *dev) drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); - drm_irq_uninstall(dev); + dev->irq_enabled = 0; dev->dev_private = NULL; return 0; @@ -127,18 +115,12 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) rcdu->ddev = dev; dev->dev_private = rcdu; - /* I/O resources and clocks */ + /* I/O resources */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rcdu->mmio)) return PTR_ERR(rcdu->mmio); - rcdu->clock = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(rcdu->clock)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return -ENOENT; - } - /* DRM/KMS objects */ ret = rcar_du_modeset_init(rcdu); if (ret < 0) { @@ -146,18 +128,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) goto done; } - /* IRQ and vblank handling */ + /* vblank handling */ ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); if (ret < 0) { dev_err(&pdev->dev, "failed to initialize vblank\n"); goto done; } - ret = drm_irq_install(dev); - if (ret < 0) { - dev_err(&pdev->dev, "failed to install IRQ handler\n"); - goto done; - } + dev->irq_enabled = 1; platform_set_drvdata(pdev, rcdu); @@ -177,18 +155,6 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); } -static irqreturn_t rcar_du_irq(int irq, void *arg) -{ - struct drm_device *dev = arg; - struct rcar_du_device *rcdu = dev->dev_private; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) - rcar_du_crtc_irq(&rcdu->crtcs[i]); - - return IRQ_HANDLED; -} - static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) { struct rcar_du_device *rcdu = dev->dev_private; @@ -221,12 +187,10 @@ static const struct file_operations rcar_du_fops = { }; static struct drm_driver rcar_du_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET - | DRIVER_PRIME, + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, .load = rcar_du_load, .unload = rcar_du_unload, .preclose = rcar_du_preclose, - .irq_handler = rcar_du_irq, .get_vblank_counter = drm_vblank_count, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 06dbf4ff139c..7d2320fb948d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -25,6 +25,8 @@ struct clk; struct device; struct drm_device; +#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ + /* * struct rcar_du_device_info - DU model-specific information * @features: device features (RCAR_DU_FEATURE_*) @@ -39,7 +41,6 @@ struct rcar_du_device { const struct rcar_du_device_info *info; void __iomem *mmio; - struct clk *clock; unsigned int use_count; struct drm_device *ddev; -- cgit v1.2.3 From 9e7db06d3ac0ffcd866e5b7114f9a7ba12f7b6ac Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jun 2013 20:54:16 +0200 Subject: drm/rcar-du: Clarify comment regarding plane Y source coordinate The R8A7790 DU documentation contains further information regarding the plane Y source coordinate. Update the comment accordingly. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index a65f81ddf51d..38ebd20e4e8d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -103,9 +103,12 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane) struct rcar_du_device *rcdu = plane->dev; unsigned int index = plane->hwindex; - /* According to the datasheet the Y position is expressed in raster line - * units. However, 32bpp formats seem to require a doubled Y position - * value. Similarly, for the second plane, NV12 and NV21 formats seem to + /* The Y position is expressed in raster line units and must be doubled + * for 32bpp formats, according to the R8A7790 datasheet. No mention of + * doubling the Y position is found in the R8A7779 datasheet, but the + * rule seems to apply there as well. + * + * Similarly, for the second plane, NV12 and NV21 formats seem to * require a halved Y position value. */ rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); -- cgit v1.2.3 From 56c5dd00f8db27a429647b14c8c309bd5d9c1d15 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 15 Jun 2013 14:21:51 +0200 Subject: drm/rcar-du: Split LVDS encoder and connector This prepares for the encoders rework. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Makefile | 1 + drivers/gpu/drm/rcar-du/rcar_du_lvds.c | 120 +-------------------------- drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c | 130 ++++++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h | 25 ++++++ 4 files changed, 158 insertions(+), 118 deletions(-) create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 7333c0094015..5def510599b0 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -2,6 +2,7 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_drv.o \ rcar_du_kms.o \ rcar_du_lvds.o \ + rcar_du_lvdscon.o \ rcar_du_plane.o \ rcar_du_vga.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c index 7aefe7267e1d..82e515741f89 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c @@ -1,5 +1,5 @@ /* - * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector + * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder * * Copyright (C) 2013 Renesas Corporation * @@ -18,123 +18,7 @@ #include "rcar_du_drv.h" #include "rcar_du_kms.h" #include "rcar_du_lvds.h" - -struct rcar_du_lvds_connector { - struct rcar_du_connector connector; - - const struct rcar_du_panel_data *panel; -}; - -#define to_rcar_lvds_connector(c) \ - container_of(c, struct rcar_du_lvds_connector, connector.connector) - -/* ----------------------------------------------------------------------------- - * Connector - */ - -static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) -{ - struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector); - struct drm_display_mode *mode; - - mode = drm_mode_create(connector->dev); - if (mode == NULL) - return 0; - - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; - mode->clock = lvdscon->panel->mode.clock; - mode->hdisplay = lvdscon->panel->mode.hdisplay; - mode->hsync_start = lvdscon->panel->mode.hsync_start; - mode->hsync_end = lvdscon->panel->mode.hsync_end; - mode->htotal = lvdscon->panel->mode.htotal; - mode->vdisplay = lvdscon->panel->mode.vdisplay; - mode->vsync_start = lvdscon->panel->mode.vsync_start; - mode->vsync_end = lvdscon->panel->mode.vsync_end; - mode->vtotal = lvdscon->panel->mode.vtotal; - mode->flags = lvdscon->panel->mode.flags; - - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - return 1; -} - -static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = rcar_du_lvds_connector_get_modes, - .mode_valid = rcar_du_lvds_connector_mode_valid, - .best_encoder = rcar_du_connector_best_encoder, -}; - -static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) -{ - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -} - -static enum drm_connector_status -rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = rcar_du_lvds_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_lvds_connector_destroy, -}; - -static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc, - const struct rcar_du_panel_data *panel) -{ - struct rcar_du_lvds_connector *lvdscon; - struct drm_connector *connector; - int ret; - - lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL); - if (lvdscon == NULL) - return -ENOMEM; - - lvdscon->panel = panel; - - connector = &lvdscon->connector.connector; - connector->display_info.width_mm = panel->width_mm; - connector->display_info.height_mm = panel->height_mm; - - ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - if (ret < 0) - return ret; - - drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); - if (ret < 0) - return ret; - - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - drm_object_property_set_value(&connector->base, - rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); - - ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); - if (ret < 0) - return ret; - - connector->encoder = &renc->encoder; - lvdscon->connector.encoder = renc; - - return 0; -} - -/* ----------------------------------------------------------------------------- - * Encoder - */ +#include "rcar_du_lvdscon.h" static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c new file mode 100644 index 000000000000..6cfcc9438c68 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -0,0 +1,130 @@ +/* + * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "rcar_du_drv.h" +#include "rcar_du_kms.h" +#include "rcar_du_lvdscon.h" + +struct rcar_du_lvds_connector { + struct rcar_du_connector connector; + + const struct rcar_du_panel_data *panel; +}; + +#define to_rcar_lvds_connector(c) \ + container_of(c, struct rcar_du_lvds_connector, connector.connector) + +static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) +{ + struct rcar_du_lvds_connector *lvdscon = + to_rcar_lvds_connector(connector); + struct drm_display_mode *mode; + + mode = drm_mode_create(connector->dev); + if (mode == NULL) + return 0; + + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; + mode->clock = lvdscon->panel->mode.clock; + mode->hdisplay = lvdscon->panel->mode.hdisplay; + mode->hsync_start = lvdscon->panel->mode.hsync_start; + mode->hsync_end = lvdscon->panel->mode.hsync_end; + mode->htotal = lvdscon->panel->mode.htotal; + mode->vdisplay = lvdscon->panel->mode.vdisplay; + mode->vsync_start = lvdscon->panel->mode.vsync_start; + mode->vsync_end = lvdscon->panel->mode.vsync_end; + mode->vtotal = lvdscon->panel->mode.vtotal; + mode->flags = lvdscon->panel->mode.flags; + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return 1; +} + +static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs connector_helper_funcs = { + .get_modes = rcar_du_lvds_connector_get_modes, + .mode_valid = rcar_du_lvds_connector_mode_valid, + .best_encoder = rcar_du_connector_best_encoder, +}; + +static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +} + +static enum drm_connector_status +rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static const struct drm_connector_funcs connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = rcar_du_lvds_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = rcar_du_lvds_connector_destroy, +}; + +int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, + struct rcar_du_encoder *renc, + const struct rcar_du_panel_data *panel) +{ + struct rcar_du_lvds_connector *lvdscon; + struct drm_connector *connector; + int ret; + + lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL); + if (lvdscon == NULL) + return -ENOMEM; + + lvdscon->panel = panel; + + connector = &lvdscon->connector.connector; + connector->display_info.width_mm = panel->width_mm; + connector->display_info.height_mm = panel->height_mm; + + ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + if (ret < 0) + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); + ret = drm_sysfs_connector_add(connector); + if (ret < 0) + return ret; + + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_object_property_set_value(&connector->base, + rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); + + ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); + if (ret < 0) + return ret; + + connector->encoder = &renc->encoder; + lvdscon->connector.encoder = renc; + + return 0; +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h new file mode 100644 index 000000000000..bff8683699ca --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h @@ -0,0 +1,25 @@ +/* + * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_LVDSCON_H__ +#define __RCAR_DU_LVDSCON_H__ + +struct rcar_du_device; +struct rcar_du_encoder; +struct rcar_du_panel_data; + +int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, + struct rcar_du_encoder *renc, + const struct rcar_du_panel_data *panel); + +#endif /* __RCAR_DU_LVDSCON_H__ */ -- cgit v1.2.3 From 9e8be27233c1e98b06edeb801640b1f96b09e466 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 15 Jun 2013 14:21:51 +0200 Subject: drm/rcar-du: Split VGA encoder and connector This prepares for the encoders rework. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Makefile | 3 +- drivers/gpu/drm/rcar-du/rcar_du_vga.c | 86 +---------------------------- drivers/gpu/drm/rcar-du/rcar_du_vga.h | 2 +- drivers/gpu/drm/rcar-du/rcar_du_vgacon.c | 95 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_vgacon.h | 23 ++++++++ 5 files changed, 123 insertions(+), 86 deletions(-) create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_vgacon.c create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_vgacon.h diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 5def510599b0..45a8479aed0d 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -4,6 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_lvds.o \ rcar_du_lvdscon.o \ rcar_du_plane.o \ - rcar_du_vga.o + rcar_du_vga.o \ + rcar_du_vgacon.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vga.c index 327289ec380d..369ab32d5652 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.c @@ -1,5 +1,5 @@ /* - * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector + * rcar_du_vga.c -- R-Car Display Unit VGA DAC * * Copyright (C) 2013 Renesas Corporation * @@ -18,89 +18,7 @@ #include "rcar_du_drv.h" #include "rcar_du_kms.h" #include "rcar_du_vga.h" - -/* ----------------------------------------------------------------------------- - * Connector - */ - -static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) -{ - return 0; -} - -static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = rcar_du_vga_connector_get_modes, - .mode_valid = rcar_du_vga_connector_mode_valid, - .best_encoder = rcar_du_connector_best_encoder, -}; - -static void rcar_du_vga_connector_destroy(struct drm_connector *connector) -{ - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -} - -static enum drm_connector_status -rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) -{ - return connector_status_unknown; -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = rcar_du_vga_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_vga_connector_destroy, -}; - -static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc) -{ - struct rcar_du_connector *rcon; - struct drm_connector *connector; - int ret; - - rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL); - if (rcon == NULL) - return -ENOMEM; - - connector = &rcon->connector; - connector->display_info.width_mm = 0; - connector->display_info.height_mm = 0; - - ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, - DRM_MODE_CONNECTOR_VGA); - if (ret < 0) - return ret; - - drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); - if (ret < 0) - return ret; - - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - drm_object_property_set_value(&connector->base, - rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); - - ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); - if (ret < 0) - return ret; - - connector->encoder = &renc->encoder; - rcon->encoder = renc; - - return 0; -} - -/* ----------------------------------------------------------------------------- - * Encoder - */ +#include "rcar_du_vgacon.h" static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vga.h index 66b4d2d7190d..b969b2075b57 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.h @@ -1,5 +1,5 @@ /* - * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector + * rcar_du_vga.h -- R-Car Display Unit VGA DAC * * Copyright (C) 2013 Renesas Corporation * diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c new file mode 100644 index 000000000000..2ee320333615 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -0,0 +1,95 @@ +/* + * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "rcar_du_drv.h" +#include "rcar_du_kms.h" +#include "rcar_du_vgacon.h" + +static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) +{ + return 0; +} + +static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs connector_helper_funcs = { + .get_modes = rcar_du_vga_connector_get_modes, + .mode_valid = rcar_du_vga_connector_mode_valid, + .best_encoder = rcar_du_connector_best_encoder, +}; + +static void rcar_du_vga_connector_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +} + +static enum drm_connector_status +rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_unknown; +} + +static const struct drm_connector_funcs connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = rcar_du_vga_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = rcar_du_vga_connector_destroy, +}; + +int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, + struct rcar_du_encoder *renc) +{ + struct rcar_du_connector *rcon; + struct drm_connector *connector; + int ret; + + rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL); + if (rcon == NULL) + return -ENOMEM; + + connector = &rcon->connector; + connector->display_info.width_mm = 0; + connector->display_info.height_mm = 0; + + ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret < 0) + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); + ret = drm_sysfs_connector_add(connector); + if (ret < 0) + return ret; + + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_object_property_set_value(&connector->base, + rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); + + ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); + if (ret < 0) + return ret; + + connector->encoder = &renc->encoder; + rcon->encoder = renc; + + return 0; +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h new file mode 100644 index 000000000000..b12b0cf7f117 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h @@ -0,0 +1,23 @@ +/* + * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_VGACON_H__ +#define __RCAR_DU_VGACON_H__ + +struct rcar_du_device; +struct rcar_du_encoder; + +int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, + struct rcar_du_encoder *renc); + +#endif /* __RCAR_DU_VGACON_H__ */ -- cgit v1.2.3 From 6978f123776594b251d26dac9bcdf3ce8e9781c8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 15 Jun 2013 15:02:12 +0200 Subject: drm/rcar-du: Merge LVDS and VGA encoder code Create a single rcar_du_encoder structure that implements a KMS encoder. The current implementation is straightforward and only configures CRTC output routing. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Makefile | 3 +- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 2 - drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 148 ++++++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 45 +++++++++ drivers/gpu/drm/rcar-du/rcar_du_kms.c | 49 ++-------- drivers/gpu/drm/rcar-du/rcar_du_kms.h | 29 +----- drivers/gpu/drm/rcar-du/rcar_du_lvds.c | 100 -------------------- drivers/gpu/drm/rcar-du/rcar_du_lvds.h | 24 ----- drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c | 1 + drivers/gpu/drm/rcar-du/rcar_du_vga.c | 67 -------------- drivers/gpu/drm/rcar-du/rcar_du_vga.h | 24 ----- drivers/gpu/drm/rcar-du/rcar_du_vgacon.c | 1 + 12 files changed, 205 insertions(+), 288 deletions(-) create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_encoder.c create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_encoder.h delete mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvds.c delete mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvds.h delete mode 100644 drivers/gpu/drm/rcar-du/rcar_du_vga.c delete mode 100644 drivers/gpu/drm/rcar-du/rcar_du_vga.h diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 45a8479aed0d..57b0fe1fa66e 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -1,10 +1,9 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_drv.o \ + rcar_du_encoder.o \ rcar_du_kms.o \ - rcar_du_lvds.o \ rcar_du_lvdscon.o \ rcar_du_plane.o \ - rcar_du_vga.o \ rcar_du_vgacon.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index aefc8a0cbcbc..03dd7018dde8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -23,10 +23,8 @@ #include "rcar_du_crtc.h" #include "rcar_du_drv.h" #include "rcar_du_kms.h" -#include "rcar_du_lvds.h" #include "rcar_du_plane.h" #include "rcar_du_regs.h" -#include "rcar_du_vga.h" #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c new file mode 100644 index 000000000000..15a56433c80c --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -0,0 +1,148 @@ +/* + * rcar_du_encoder.c -- R-Car Display Unit Encoder + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "rcar_du_drv.h" +#include "rcar_du_encoder.h" +#include "rcar_du_kms.h" +#include "rcar_du_lvdscon.h" +#include "rcar_du_vgacon.h" + +/* ----------------------------------------------------------------------------- + * Common connector functions + */ + +struct drm_encoder * +rcar_du_connector_best_encoder(struct drm_connector *connector) +{ + struct rcar_du_connector *rcon = to_rcar_connector(connector); + + return &rcon->encoder->encoder; +} + +/* ----------------------------------------------------------------------------- + * Encoder + */ + +static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_mode *panel_mode; + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + bool found = false; + + /* DAC encoders have currently no restriction on the mode. */ + if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) + return true; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + found = true; + break; + } + } + + if (!found) { + dev_dbg(dev->dev, "mode_fixup: no connector found\n"); + return false; + } + + if (list_empty(&connector->modes)) { + dev_dbg(dev->dev, "mode_fixup: empty modes list\n"); + return false; + } + + panel_mode = list_first_entry(&connector->modes, + struct drm_display_mode, head); + + /* We're not allowed to modify the resolution. */ + if (mode->hdisplay != panel_mode->hdisplay || + mode->vdisplay != panel_mode->vdisplay) + return false; + + /* The flat panel mode is fixed, just copy it to the adjusted mode. */ + drm_mode_copy(adjusted_mode, panel_mode); + + return true; +} + +static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder) +{ +} + +static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder) +{ +} + +static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + + rcar_du_crtc_route_output(encoder->crtc, renc->output); +} + +static const struct drm_encoder_helper_funcs encoder_helper_funcs = { + .dpms = rcar_du_encoder_dpms, + .mode_fixup = rcar_du_encoder_mode_fixup, + .prepare = rcar_du_encoder_mode_prepare, + .commit = rcar_du_encoder_mode_commit, + .mode_set = rcar_du_encoder_mode_set, +}; + +static const struct drm_encoder_funcs encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int rcar_du_encoder_init(struct rcar_du_device *rcdu, + enum rcar_du_encoder_type type, unsigned int output, + const struct rcar_du_encoder_data *data) +{ + struct rcar_du_encoder *renc; + int ret; + + renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); + if (renc == NULL) + return -ENOMEM; + + renc->output = output; + + ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs, + type); + if (ret < 0) + return ret; + + drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); + + switch (type) { + case RCAR_DU_ENCODER_LVDS: + return rcar_du_lvds_connector_init(rcdu, renc, + &data->u.lvds.panel); + + case RCAR_DU_ENCODER_VGA: + return rcar_du_vga_connector_init(rcdu, renc); + + default: + return -EINVAL; + } +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h new file mode 100644 index 000000000000..4f76e16bca88 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -0,0 +1,45 @@ +/* + * rcar_du_encoder.h -- R-Car Display Unit Encoder + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_ENCODER_H__ +#define __RCAR_DU_ENCODER_H__ + +#include + +struct rcar_du_device; +struct rcar_du_encoder_data; + +struct rcar_du_encoder { + struct drm_encoder encoder; + unsigned int output; +}; + +#define to_rcar_encoder(e) \ + container_of(e, struct rcar_du_encoder, encoder) + +struct rcar_du_connector { + struct drm_connector connector; + struct rcar_du_encoder *encoder; +}; + +#define to_rcar_connector(c) \ + container_of(c, struct rcar_du_connector, connector) + +struct drm_encoder * +rcar_du_connector_best_encoder(struct drm_connector *connector); + +int rcar_du_encoder_init(struct rcar_du_device *rcdu, + enum rcar_du_encoder_type type, unsigned int output, + const struct rcar_du_encoder_data *data); + +#endif /* __RCAR_DU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index d30c2e29bee2..3f8483cc0483 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -19,10 +19,9 @@ #include "rcar_du_crtc.h" #include "rcar_du_drv.h" +#include "rcar_du_encoder.h" #include "rcar_du_kms.h" -#include "rcar_du_lvds.h" #include "rcar_du_regs.h" -#include "rcar_du_vga.h" /* ----------------------------------------------------------------------------- * Format helpers @@ -105,35 +104,6 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) return NULL; } -/* ----------------------------------------------------------------------------- - * Common connector and encoder functions - */ - -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector) -{ - struct rcar_du_connector *rcon = to_rcar_connector(connector); - - return &rcon->encoder->encoder; -} - -void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder) -{ -} - -void rcar_du_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); - - rcar_du_crtc_route_output(encoder->crtc, renc->output); -} - -void rcar_du_encoder_mode_commit(struct drm_encoder *encoder) -{ -} - /* ----------------------------------------------------------------------------- * Frame buffer */ @@ -221,6 +191,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) const struct rcar_du_encoder_data *pdata = &rcdu->pdata->encoders[i]; + if (pdata->encoder == RCAR_DU_ENCODER_UNUSED) + continue; + if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { dev_warn(rcdu->dev, "encoder %u references unexisting output %u, skipping\n", @@ -228,18 +201,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) continue; } - switch (pdata->encoder) { - case RCAR_DU_ENCODER_VGA: - rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output); - break; - - case RCAR_DU_ENCODER_LVDS: - rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output); - break; - - default: - break; - } + rcar_du_encoder_init(rcdu, pdata->encoder, pdata->output, + pdata); } /* Set the possible CRTCs and possible clones. All encoders can be diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index dba472263486..5750e6af5655 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -16,8 +16,9 @@ #include -#include - +struct drm_file; +struct drm_device; +struct drm_mode_create_dumb; struct rcar_du_device; struct rcar_du_format_info { @@ -28,32 +29,8 @@ struct rcar_du_format_info { unsigned int edf; }; -struct rcar_du_encoder { - struct drm_encoder encoder; - unsigned int output; -}; - -#define to_rcar_encoder(e) \ - container_of(e, struct rcar_du_encoder, encoder) - -struct rcar_du_connector { - struct drm_connector connector; - struct rcar_du_encoder *encoder; -}; - -#define to_rcar_connector(c) \ - container_of(c, struct rcar_du_connector, connector) - const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc); -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector); -void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder); -void rcar_du_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -void rcar_du_encoder_mode_commit(struct drm_encoder *encoder); - int rcar_du_modeset_init(struct rcar_du_device *rcdu); int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c deleted file mode 100644 index 82e515741f89..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder - * - * Copyright (C) 2013 Renesas Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include - -#include "rcar_du_drv.h" -#include "rcar_du_kms.h" -#include "rcar_du_lvds.h" -#include "rcar_du_lvdscon.h" - -static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) -{ -} - -static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - const struct drm_display_mode *panel_mode; - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - bool found = false; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - found = true; - break; - } - } - - if (!found) { - dev_dbg(dev->dev, "mode_fixup: no connector found\n"); - return false; - } - - if (list_empty(&connector->modes)) { - dev_dbg(dev->dev, "mode_fixup: empty modes list\n"); - return false; - } - - panel_mode = list_first_entry(&connector->modes, - struct drm_display_mode, head); - - /* We're not allowed to modify the resolution. */ - if (mode->hdisplay != panel_mode->hdisplay || - mode->vdisplay != panel_mode->vdisplay) - return false; - - /* The flat panel mode is fixed, just copy it to the adjusted mode. */ - drm_mode_copy(adjusted_mode, panel_mode); - - return true; -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = rcar_du_lvds_encoder_dpms, - .mode_fixup = rcar_du_lvds_encoder_mode_fixup, - .prepare = rcar_du_encoder_mode_prepare, - .commit = rcar_du_encoder_mode_commit, - .mode_set = rcar_du_encoder_mode_set, -}; - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -int rcar_du_lvds_init(struct rcar_du_device *rcdu, - const struct rcar_du_encoder_lvds_data *data, - unsigned int output) -{ - struct rcar_du_encoder *renc; - int ret; - - renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); - if (renc == NULL) - return -ENOMEM; - - renc->output = output; - - ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS); - if (ret < 0) - return ret; - - drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); - - return rcar_du_lvds_connector_init(rcdu, renc, &data->panel); -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvds.h deleted file mode 100644 index b47f8328e103..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector - * - * Copyright (C) 2013 Renesas Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_LVDS_H__ -#define __RCAR_DU_LVDS_H__ - -struct rcar_du_device; -struct rcar_du_encoder_lvds_data; - -int rcar_du_lvds_init(struct rcar_du_device *rcdu, - const struct rcar_du_encoder_lvds_data *data, - unsigned int output); - -#endif /* __RCAR_DU_LVDS_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 6cfcc9438c68..4f3ba93cd91d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -16,6 +16,7 @@ #include #include "rcar_du_drv.h" +#include "rcar_du_encoder.h" #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vga.c deleted file mode 100644 index 369ab32d5652..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c +++ /dev/null @@ -1,67 +0,0 @@ -/* - * rcar_du_vga.c -- R-Car Display Unit VGA DAC - * - * Copyright (C) 2013 Renesas Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include - -#include "rcar_du_drv.h" -#include "rcar_du_kms.h" -#include "rcar_du_vga.h" -#include "rcar_du_vgacon.h" - -static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode) -{ -} - -static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = rcar_du_vga_encoder_dpms, - .mode_fixup = rcar_du_vga_encoder_mode_fixup, - .prepare = rcar_du_encoder_mode_prepare, - .commit = rcar_du_encoder_mode_commit, - .mode_set = rcar_du_encoder_mode_set, -}; - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -int rcar_du_vga_init(struct rcar_du_device *rcdu, - const struct rcar_du_encoder_vga_data *data, - unsigned int output) -{ - struct rcar_du_encoder *renc; - int ret; - - renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); - if (renc == NULL) - return -ENOMEM; - - renc->output = output; - - ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs, - DRM_MODE_ENCODER_DAC); - if (ret < 0) - return ret; - - drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); - - return rcar_du_vga_connector_init(rcdu, renc); -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vga.h deleted file mode 100644 index b969b2075b57..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * rcar_du_vga.h -- R-Car Display Unit VGA DAC - * - * Copyright (C) 2013 Renesas Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_VGA_H__ -#define __RCAR_DU_VGA_H__ - -struct rcar_du_device; -struct rcar_du_encoder_vga_data; - -int rcar_du_vga_init(struct rcar_du_device *rcdu, - const struct rcar_du_encoder_vga_data *data, - unsigned int output); - -#endif /* __RCAR_DU_VGA_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 2ee320333615..36105db9bda1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -16,6 +16,7 @@ #include #include "rcar_du_drv.h" +#include "rcar_du_encoder.h" #include "rcar_du_kms.h" #include "rcar_du_vgacon.h" -- cgit v1.2.3 From 9194731c5f9b2664c882a515b3398a29384a6864 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 16:25:35 +0200 Subject: drm/rcar-du: Rename platform data fields to match what they describe The struct rcar_du_encoder_data encoder::field describes the encoder type, and the rcar_du_encoder_lvds_data and rcar_du_encoder_vga_data structures describe connector properties. Rename them accordingly. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 3 ++- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 5 ++--- include/linux/platform_data/rcar-du.h | 19 +++++++++++++------ 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 15a56433c80c..0d0375c7ee44 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -137,7 +137,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, switch (type) { case RCAR_DU_ENCODER_LVDS: return rcar_du_lvds_connector_init(rcdu, renc, - &data->u.lvds.panel); + &data->connector.lvds.panel); case RCAR_DU_ENCODER_VGA: return rcar_du_vga_connector_init(rcdu, renc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 4f76e16bca88..08cde1293892 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -14,10 +14,11 @@ #ifndef __RCAR_DU_ENCODER_H__ #define __RCAR_DU_ENCODER_H__ +#include + #include struct rcar_du_device; -struct rcar_du_encoder_data; struct rcar_du_encoder { struct drm_encoder encoder; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 3f8483cc0483..a8eef167d51a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -191,7 +191,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) const struct rcar_du_encoder_data *pdata = &rcdu->pdata->encoders[i]; - if (pdata->encoder == RCAR_DU_ENCODER_UNUSED) + if (pdata->type == RCAR_DU_ENCODER_UNUSED) continue; if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { @@ -201,8 +201,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) continue; } - rcar_du_encoder_init(rcdu, pdata->encoder, pdata->output, - pdata); + rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata); } /* Set the possible CRTCs and possible clones. All encoders can be diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h index 80587fdbba3e..64cd8635e6e6 100644 --- a/include/linux/platform_data/rcar-du.h +++ b/include/linux/platform_data/rcar-du.h @@ -28,22 +28,29 @@ struct rcar_du_panel_data { struct drm_mode_modeinfo mode; }; -struct rcar_du_encoder_lvds_data { +struct rcar_du_connector_lvds_data { struct rcar_du_panel_data panel; }; -struct rcar_du_encoder_vga_data { +struct rcar_du_connector_vga_data { /* TODO: Add DDC information for EDID retrieval */ }; +/* + * struct rcar_du_encoder_data - Encoder platform data + * @type: the encoder type (RCAR_DU_ENCODER_*) + * @output: the DU output the connector is connected to + * @connector.lvds: platform data for LVDS connectors + * @connector.vga: platform data for VGA connectors + */ struct rcar_du_encoder_data { - enum rcar_du_encoder_type encoder; + enum rcar_du_encoder_type type; unsigned int output; union { - struct rcar_du_encoder_lvds_data lvds; - struct rcar_du_encoder_vga_data vga; - } u; + struct rcar_du_connector_lvds_data lvds; + struct rcar_du_connector_vga_data vga; + } connector; }; struct rcar_du_platform_data { -- cgit v1.2.3 From ae425b6a77a1118b1b4f594efe4aaa4243bf222b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 21:02:49 +0200 Subject: drm/rcar-du: Create rcar_du_planes structure Move the plane-related fields of struct rcar_du_device to their own structure. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 11 +---------- drivers/gpu/drm/rcar-du/rcar_du_plane.h | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 7d2320fb948d..0305c21d71f3 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -15,7 +15,6 @@ #define __RCAR_DU_DRV_H__ #include -#include #include #include "rcar_du_crtc.h" @@ -49,15 +48,7 @@ struct rcar_du_device { unsigned int used_crtcs; unsigned int num_crtcs; - struct { - struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES]; - unsigned int free; - struct mutex lock; - - struct drm_property *alpha; - struct drm_property *colorkey; - struct drm_property *zpos; - } planes; + struct rcar_du_planes planes; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 5397dba2fe57..5c8488ca019f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -14,8 +14,11 @@ #ifndef __RCAR_DU_PLANE_H__ #define __RCAR_DU_PLANE_H__ -struct drm_crtc; -struct drm_framebuffer; +#include + +#include +#include + struct rcar_du_device; struct rcar_du_format_info; @@ -54,6 +57,16 @@ struct rcar_du_plane { unsigned int dst_y; }; +struct rcar_du_planes { + struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES]; + unsigned int free; + struct mutex lock; + + struct drm_property *alpha; + struct drm_property *colorkey; + struct drm_property *zpos; +}; + int rcar_du_plane_init(struct rcar_du_device *rcdu); int rcar_du_plane_register(struct rcar_du_device *rcdu); void rcar_du_plane_setup(struct rcar_du_plane *plane); -- cgit v1.2.3 From 7fe99fda5f5c52a01b2c966aa68341a0b3d8ab33 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 19:18:31 +0200 Subject: drm/rcar-du: Rename rcar_du_plane_(init|register) to rcar_du_planes_* The functions initialize or register all planes, rename them accordingly. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 4 ++-- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 4 ++-- drivers/gpu/drm/rcar-du/rcar_du_plane.h | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index a8eef167d51a..a1343fbde57a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -174,7 +174,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rcdu->ddev->mode_config.max_height = 2047; rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; - ret = rcar_du_plane_init(rcdu); + ret = rcar_du_planes_init(rcdu); if (ret < 0) return ret; @@ -215,7 +215,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = 1 << 0; } - ret = rcar_du_plane_register(rcdu); + ret = rcar_du_planes_register(rcdu); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 38ebd20e4e8d..29f21477ef0e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -435,7 +435,7 @@ static const uint32_t formats[] = { DRM_FORMAT_NV16, }; -int rcar_du_plane_init(struct rcar_du_device *rcdu) +int rcar_du_planes_init(struct rcar_du_device *rcdu) { unsigned int i; @@ -475,7 +475,7 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu) return 0; } -int rcar_du_plane_register(struct rcar_du_device *rcdu) +int rcar_du_planes_register(struct rcar_du_device *rcdu) { unsigned int i; int ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 5c8488ca019f..bcf6f76f56a0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -67,8 +67,9 @@ struct rcar_du_planes { struct drm_property *zpos; }; -int rcar_du_plane_init(struct rcar_du_device *rcdu); -int rcar_du_plane_register(struct rcar_du_device *rcdu); +int rcar_du_planes_init(struct rcar_du_device *rcdu); +int rcar_du_planes_register(struct rcar_du_device *rcdu); + void rcar_du_plane_setup(struct rcar_du_plane *plane); void rcar_du_plane_update_base(struct rcar_du_plane *plane); void rcar_du_plane_compute_base(struct rcar_du_plane *plane, -- cgit v1.2.3 From cb2025d2509ffab1c426509fd9de3d83e40398b9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 21:01:02 +0200 Subject: drm/rcar-du: Introduce CRTCs groups The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending unit, timings generator, ...) and device-global resources (start/stop control, planes, ...) shared between the two CRTCs. The R8A7790 introduced a third CRTC with its own set of global resources This would be modeled as two separate DU device instances if it wasn't for a handful or resources that are shared between the three CRTCs (mostly related to input and output routing). For this reason the R8A7790 DU must be modeled as a single device with three CRTCs, two sets of "semi-global" resources, and a few device-global resources. Introduce a new rcar_du_group driver-specific object, without any real counterpart in the DU documentation, that models those semi-global resources. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Makefile | 1 + drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 92 ++++++------------- drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 5 +- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 46 ---------- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 10 +-- drivers/gpu/drm/rcar-du/rcar_du_group.c | 127 ++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_group.h | 47 ++++++++++ drivers/gpu/drm/rcar-du/rcar_du_kms.c | 11 ++- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 155 ++++++++++++++++---------------- drivers/gpu/drm/rcar-du/rcar_du_plane.h | 8 +- 10 files changed, 299 insertions(+), 203 deletions(-) create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_group.c create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_group.h diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 57b0fe1fa66e..b9b5e666fbba 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -1,6 +1,7 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_drv.o \ rcar_du_encoder.o \ + rcar_du_group.o \ rcar_du_kms.o \ rcar_du_lvdscon.o \ rcar_du_plane.o \ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 03dd7018dde8..7784a3ba7854 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -30,21 +30,21 @@ static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); } static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); } static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); @@ -52,7 +52,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); @@ -61,7 +61,7 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr, u32 set) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg); rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); @@ -69,14 +69,13 @@ static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; int ret; ret = clk_prepare_enable(rcrtc->clock); if (ret < 0) return ret; - ret = rcar_du_get(rcdu); + ret = rcar_du_group_get(rcrtc->group); if (ret < 0) clk_disable_unprepare(rcrtc->clock); @@ -85,17 +84,14 @@ static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; - - rcar_du_put(rcdu); + rcar_du_group_put(rcrtc->group); clk_disable_unprepare(rcrtc->clock); } static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { - struct drm_crtc *crtc = &rcrtc->crtc; - struct rcar_du_device *rcdu = crtc->dev->dev_private; - const struct drm_display_mode *mode = &crtc->mode; + const struct drm_display_mode *mode = &rcrtc->crtc.mode; + struct rcar_du_device *rcdu = rcrtc->group->dev; unsigned long clk; u32 value; u32 div; @@ -136,7 +132,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; + struct rcar_du_device *rcdu = rcrtc->group->dev; u32 dorcr = rcar_du_read(rcdu, DORCR); dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); @@ -153,36 +149,6 @@ static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) rcar_du_write(rcdu, DORCR, dorcr); } -static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start) -{ - rcar_du_write(rcdu, DSYSR, - (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) | - (start ? DSYSR_DEN : DSYSR_DRES)); -} - -static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start) -{ - /* Many of the configuration bits are only updated when the display - * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some - * of those bits could be pre-configured, but others (especially the - * bits related to plane assignment to display timing controllers) need - * to be modified at runtime. - * - * Restart the display controller if a start is requested. Sorry for the - * flicker. It should be possible to move most of the "DRES-update" bits - * setup to driver initialization time and minimize the number of cases - * when the display controller will have to be restarted. - */ - if (start) { - if (rcdu->used_crtcs++ != 0) - __rcar_du_start_stop(rcdu, false); - __rcar_du_start_stop(rcdu, true); - } else { - if (--rcdu->used_crtcs == 0) - __rcar_du_start_stop(rcdu, false); - } -} - void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -195,8 +161,8 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output) void rcar_du_crtc_update_planes(struct drm_crtc *crtc) { - struct rcar_du_device *rcdu = crtc->dev->dev_private; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct rcar_du_device *rcdu = rcrtc->group->dev; struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; unsigned int num_planes = 0; unsigned int prio = 0; @@ -204,8 +170,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) u32 dptsr = 0; u32 dspr = 0; - for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { - struct rcar_du_plane *plane = &rcdu->planes.planes[i]; + for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { + struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; unsigned int j; if (plane->crtc != &rcrtc->crtc || !plane->enabled) @@ -254,10 +220,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) */ if (value != dptsr) { rcar_du_write(rcdu, DPTSR, dptsr); - if (rcdu->used_crtcs) { - __rcar_du_start_stop(rcdu, false); - __rcar_du_start_stop(rcdu, true); - } + if (rcrtc->group->used_crtcs) + rcar_du_group_restart(rcrtc->group); } } @@ -267,7 +231,6 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) { struct drm_crtc *crtc = &rcrtc->crtc; - struct rcar_du_device *rcdu = crtc->dev->dev_private; unsigned int i; if (rcrtc->started) @@ -284,14 +247,14 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) rcar_du_crtc_set_display_timing(rcrtc); rcar_du_crtc_set_routing(rcrtc); - mutex_lock(&rcdu->planes.lock); + mutex_lock(&rcrtc->group->planes.lock); rcrtc->plane->enabled = true; rcar_du_crtc_update_planes(crtc); - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&rcrtc->group->planes.lock); /* Setup planes. */ - for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { - struct rcar_du_plane *plane = &rcdu->planes.planes[i]; + for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { + struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; if (plane->crtc != crtc || !plane->enabled) continue; @@ -305,7 +268,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) */ rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER); - rcar_du_start_stop(rcdu, true); + rcar_du_group_start_stop(rcrtc->group, true); rcrtc->started = true; } @@ -313,22 +276,21 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) { struct drm_crtc *crtc = &rcrtc->crtc; - struct rcar_du_device *rcdu = crtc->dev->dev_private; if (!rcrtc->started) return; - mutex_lock(&rcdu->planes.lock); + mutex_lock(&rcrtc->group->planes.lock); rcrtc->plane->enabled = false; rcar_du_crtc_update_planes(crtc); - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&rcrtc->group->planes.lock); /* Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. */ rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); - rcar_du_start_stop(rcdu, false); + rcar_du_group_start_stop(rcrtc->group, false); rcrtc->started = false; } @@ -406,8 +368,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { - struct rcar_du_device *rcdu = crtc->dev->dev_private; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct rcar_du_device *rcdu = rcrtc->group->dev; const struct rcar_du_format_info *format; int ret; @@ -583,8 +545,9 @@ static const struct drm_crtc_funcs crtc_funcs = { .page_flip = rcar_du_crtc_page_flip, }; -int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) +int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) { + struct rcar_du_device *rcdu = rgrp->dev; struct platform_device *pdev = to_platform_device(rcdu->dev); struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; struct drm_crtc *crtc = &rcrtc->crtc; @@ -608,10 +571,11 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) return PTR_ERR(rcrtc->clock); } + rcrtc->group = rgrp; rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; rcrtc->index = index; rcrtc->dpms = DRM_MODE_DPMS_OFF; - rcrtc->plane = &rcdu->planes.planes[index]; + rcrtc->plane = &rgrp->planes.planes[index]; rcrtc->plane->crtc = crtc; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 5b69e98a3b92..542a7feceb20 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -19,7 +19,7 @@ #include #include -struct rcar_du_device; +struct rcar_du_group; struct rcar_du_plane; struct rcar_du_crtc { @@ -34,10 +34,11 @@ struct rcar_du_crtc { unsigned int outputs; int dpms; + struct rcar_du_group *group; struct rcar_du_plane *plane; }; -int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index); +int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, struct drm_file *file); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 5f82e046f83e..cdc363b6cd37 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -28,52 +28,6 @@ #include "rcar_du_kms.h" #include "rcar_du_regs.h" -/* ----------------------------------------------------------------------------- - * Core device operations - */ - -/* - * rcar_du_get - Acquire a reference to the DU - * - * Acquiring the first reference setups core registers. A reference must be - * held before accessing any hardware registers. - * - * This function must be called with the DRM mode_config lock held. - * - * Return 0 in case of success or a negative error code otherwise. - */ -int rcar_du_get(struct rcar_du_device *rcdu) -{ - if (rcdu->use_count) - goto done; - - /* Enable extended features */ - rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE); - rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G); - rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3); - rcar_du_write(rcdu, DEFR4, DEFR4_CODE); - rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5); - - /* Use DS1PR and DS2PR to configure planes priorities and connects the - * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. - */ - rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS); - -done: - rcdu->use_count++; - return 0; -} - -/* - * rcar_du_put - Release a reference to the DU - * - * This function must be called with the DRM mode_config lock held. - */ -void rcar_du_put(struct rcar_du_device *rcdu) -{ - --rcdu->use_count; -} - /* ----------------------------------------------------------------------------- * DRM operations */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 0305c21d71f3..5b57a2f9b52a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -18,11 +18,12 @@ #include #include "rcar_du_crtc.h" -#include "rcar_du_plane.h" +#include "rcar_du_group.h" struct clk; struct device; struct drm_device; +struct rcar_du_device; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ @@ -40,15 +41,13 @@ struct rcar_du_device { const struct rcar_du_device_info *info; void __iomem *mmio; - unsigned int use_count; struct drm_device *ddev; struct rcar_du_crtc crtcs[2]; - unsigned int used_crtcs; unsigned int num_crtcs; - struct rcar_du_planes planes; + struct rcar_du_group group; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, @@ -57,9 +56,6 @@ static inline bool rcar_du_has(struct rcar_du_device *rcdu, return rcdu->info->features & feature; } -int rcar_du_get(struct rcar_du_device *rcdu); -void rcar_du_put(struct rcar_du_device *rcdu); - static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) { return ioread32(rcdu->mmio + reg); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c new file mode 100644 index 000000000000..625b9f446965 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -0,0 +1,127 @@ +/* + * rcar_du_group.c -- R-Car Display Unit Channels Pair + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending + * unit, timings generator, ...) and device-global resources (start/stop + * control, planes, ...) shared between the two CRTCs. + * + * The R8A7790 introduced a third CRTC with its own set of global resources. + * This would be modeled as two separate DU device instances if it wasn't for + * a handful or resources that are shared between the three CRTCs (mostly + * related to input and output routing). For this reason the R8A7790 DU must be + * modeled as a single device with three CRTCs, two sets of "semi-global" + * resources, and a few device-global resources. + * + * The rcar_du_group object is a driver specific object, without any real + * counterpart in the DU documentation, that models those semi-global resources. + */ + +#include + +#include "rcar_du_drv.h" +#include "rcar_du_group.h" +#include "rcar_du_regs.h" + +static u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg) +{ + return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg); +} + +static void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data) +{ + rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data); +} + +static void rcar_du_group_setup(struct rcar_du_group *rgrp) +{ + /* Enable extended features */ + rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE); + rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G); + rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3); + rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE); + rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5); + + /* Use DS1PR and DS2PR to configure planes priorities and connects the + * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. + */ + rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS); +} + +/* + * rcar_du_group_get - Acquire a reference to the DU channels group + * + * Acquiring the first reference setups core registers. A reference must be held + * before accessing any hardware registers. + * + * This function must be called with the DRM mode_config lock held. + * + * Return 0 in case of success or a negative error code otherwise. + */ +int rcar_du_group_get(struct rcar_du_group *rgrp) +{ + if (rgrp->use_count) + goto done; + + rcar_du_group_setup(rgrp); + +done: + rgrp->use_count++; + return 0; +} + +/* + * rcar_du_group_put - Release a reference to the DU + * + * This function must be called with the DRM mode_config lock held. + */ +void rcar_du_group_put(struct rcar_du_group *rgrp) +{ + --rgrp->use_count; +} + +static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) +{ + rcar_du_group_write(rgrp, DSYSR, + (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) | + (start ? DSYSR_DEN : DSYSR_DRES)); +} + +void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) +{ + /* Many of the configuration bits are only updated when the display + * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some + * of those bits could be pre-configured, but others (especially the + * bits related to plane assignment to display timing controllers) need + * to be modified at runtime. + * + * Restart the display controller if a start is requested. Sorry for the + * flicker. It should be possible to move most of the "DRES-update" bits + * setup to driver initialization time and minimize the number of cases + * when the display controller will have to be restarted. + */ + if (start) { + if (rgrp->used_crtcs++ != 0) + __rcar_du_group_start_stop(rgrp, false); + __rcar_du_group_start_stop(rgrp, true); + } else { + if (--rgrp->used_crtcs == 0) + __rcar_du_group_start_stop(rgrp, false); + } +} + +void rcar_du_group_restart(struct rcar_du_group *rgrp) +{ + __rcar_du_group_start_stop(rgrp, false); + __rcar_du_group_start_stop(rgrp, true); +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h new file mode 100644 index 000000000000..748331bbb8fe --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -0,0 +1,47 @@ +/* + * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_GROUP_H__ +#define __RCAR_DU_GROUP_H__ + +#include "rcar_du_plane.h" + +struct rcar_du_device; + +/* + * struct rcar_du_group - CRTCs and planes group + * @dev: the DU device + * @mmio_offset: registers offset in the device memory map + * @index: group index + * @use_count: number of users of the group (rcar_du_group_(get|put)) + * @used_crtcs: number of CRTCs currently in use + * @planes: planes handled by the group + */ +struct rcar_du_group { + struct rcar_du_device *dev; + unsigned int mmio_offset; + unsigned int index; + + unsigned int use_count; + unsigned int used_crtcs; + + struct rcar_du_planes planes; +}; + +int rcar_du_group_get(struct rcar_du_group *rgrp); +void rcar_du_group_put(struct rcar_du_group *rgrp); +void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); +void rcar_du_group_restart(struct rcar_du_group *rgrp); + + +#endif /* __RCAR_DU_GROUP_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index a1343fbde57a..c32e0f9d4823 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -174,17 +174,20 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rcdu->ddev->mode_config.max_height = 2047; rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; - ret = rcar_du_planes_init(rcdu); + rcdu->group.dev = rcdu; + rcdu->group.index = 0; + rcdu->group.used_crtcs = 0; + + ret = rcar_du_planes_init(&rcdu->group); if (ret < 0) return ret; for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) { - ret = rcar_du_crtc_create(rcdu, i); + ret = rcar_du_crtc_create(&rcdu->group, i); if (ret < 0) return ret; } - rcdu->used_crtcs = 0; rcdu->num_crtcs = i; for (i = 0; i < rcdu->pdata->num_encoders; ++i) { @@ -215,7 +218,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = 1 << 0; } - ret = rcar_du_planes_register(rcdu); + ret = rcar_du_planes_register(&rcdu->group); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 29f21477ef0e..1e9cf7c92f8e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -36,71 +36,73 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane) return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane; } -static u32 rcar_du_plane_read(struct rcar_du_device *rcdu, +static u32 rcar_du_plane_read(struct rcar_du_group *rgrp, unsigned int index, u32 reg) { - return rcar_du_read(rcdu, index * PLANE_OFF + reg); + return rcar_du_read(rgrp->dev, + rgrp->mmio_offset + index * PLANE_OFF + reg); } -static void rcar_du_plane_write(struct rcar_du_device *rcdu, +static void rcar_du_plane_write(struct rcar_du_group *rgrp, unsigned int index, u32 reg, u32 data) { - rcar_du_write(rcdu, index * PLANE_OFF + reg, data); + rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg, + data); } int rcar_du_plane_reserve(struct rcar_du_plane *plane, const struct rcar_du_format_info *format) { - struct rcar_du_device *rcdu = plane->dev; + struct rcar_du_group *rgrp = plane->group; unsigned int i; int ret = -EBUSY; - mutex_lock(&rcdu->planes.lock); + mutex_lock(&rgrp->planes.lock); - for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { - if (!(rcdu->planes.free & (1 << i))) + for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) { + if (!(rgrp->planes.free & (1 << i))) continue; if (format->planes == 1 || - rcdu->planes.free & (1 << ((i + 1) % 8))) + rgrp->planes.free & (1 << ((i + 1) % 8))) break; } - if (i == ARRAY_SIZE(rcdu->planes.planes)) + if (i == ARRAY_SIZE(rgrp->planes.planes)) goto done; - rcdu->planes.free &= ~(1 << i); + rgrp->planes.free &= ~(1 << i); if (format->planes == 2) - rcdu->planes.free &= ~(1 << ((i + 1) % 8)); + rgrp->planes.free &= ~(1 << ((i + 1) % 8)); plane->hwindex = i; ret = 0; done: - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&rgrp->planes.lock); return ret; } void rcar_du_plane_release(struct rcar_du_plane *plane) { - struct rcar_du_device *rcdu = plane->dev; + struct rcar_du_group *rgrp = plane->group; if (plane->hwindex == -1) return; - mutex_lock(&rcdu->planes.lock); - rcdu->planes.free |= 1 << plane->hwindex; + mutex_lock(&rgrp->planes.lock); + rgrp->planes.free |= 1 << plane->hwindex; if (plane->format->planes == 2) - rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8); - mutex_unlock(&rcdu->planes.lock); + rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8); + mutex_unlock(&rgrp->planes.lock); plane->hwindex = -1; } void rcar_du_plane_update_base(struct rcar_du_plane *plane) { - struct rcar_du_device *rcdu = plane->dev; + struct rcar_du_group *rgrp = plane->group; unsigned int index = plane->hwindex; /* The Y position is expressed in raster line units and must be doubled @@ -111,18 +113,18 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane) * Similarly, for the second plane, NV12 and NV21 formats seem to * require a halved Y position value. */ - rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); - rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * + rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); + rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * (plane->format->bpp == 32 ? 2 : 1)); - rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]); + rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]); if (plane->format->planes == 2) { index = (index + 1) % 8; - rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); - rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * + rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); + rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * (plane->format->bpp == 16 ? 2 : 1) / 2); - rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]); + rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]); } } @@ -143,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane, static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, unsigned int index) { - struct rcar_du_device *rcdu = plane->dev; + struct rcar_du_group *rgrp = plane->group; u32 colorkey; u32 pnmr; @@ -157,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, * enable alpha-blending regardless of the X bit value. */ if (plane->format->fourcc != DRM_FORMAT_XRGB1555) - rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0); + rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0); else - rcar_du_plane_write(rcdu, index, PnALPHAR, + rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_X | plane->alpha); pnmr = PnMR_BM_MD | plane->format->pnmr; @@ -175,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, if (plane->format->fourcc == DRM_FORMAT_YUYV) pnmr |= PnMR_YCDF_YUYV; - rcar_du_plane_write(rcdu, index, PnMR, pnmr); + rcar_du_plane_write(rgrp, index, PnMR, pnmr); switch (plane->format->fourcc) { case DRM_FORMAT_RGB565: colorkey = ((plane->colorkey & 0xf80000) >> 8) | ((plane->colorkey & 0x00fc00) >> 5) | ((plane->colorkey & 0x0000f8) >> 3); - rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); + rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); break; case DRM_FORMAT_ARGB1555: @@ -190,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, colorkey = ((plane->colorkey & 0xf80000) >> 9) | ((plane->colorkey & 0x00f800) >> 6) | ((plane->colorkey & 0x0000f8) >> 3); - rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); + rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: - rcar_du_plane_write(rcdu, index, PnTC3R, + rcar_du_plane_write(rgrp, index, PnTC3R, PnTC3R_CODE | (plane->colorkey & 0xffffff)); break; } @@ -204,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, static void __rcar_du_plane_setup(struct rcar_du_plane *plane, unsigned int index) { - struct rcar_du_device *rcdu = plane->dev; + struct rcar_du_group *rgrp = plane->group; u32 ddcr2 = PnDDCR2_CODE; u32 ddcr4; u32 mwr; @@ -214,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, * The data format is selected by the DDDF field in PnMR and the EDF * field in DDCR4. */ - ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4); + ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4); ddcr4 &= ~PnDDCR4_EDF_MASK; ddcr4 |= plane->format->edf | PnDDCR4_CODE; @@ -235,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, } } - rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2); - rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4); + rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2); + rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); /* Memory pitch (expressed in pixels) */ if (plane->format->planes == 2) @@ -244,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, else mwr = plane->pitch * 8 / plane->format->bpp; - rcar_du_plane_write(rcdu, index, PnMWR, mwr); + rcar_du_plane_write(rgrp, index, PnMWR, mwr); /* Destination position and size */ - rcar_du_plane_write(rcdu, index, PnDSXR, plane->width); - rcar_du_plane_write(rcdu, index, PnDSYR, plane->height); - rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x); - rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y); + rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); + rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); + rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x); + rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y); /* Wrap-around and blinking, disabled */ - rcar_du_plane_write(rcdu, index, PnWASPR, 0); - rcar_du_plane_write(rcdu, index, PnWAMWR, 4095); - rcar_du_plane_write(rcdu, index, PnBTR, 0); - rcar_du_plane_write(rcdu, index, PnMLR, 0); + rcar_du_plane_write(rgrp, index, PnWASPR, 0); + rcar_du_plane_write(rgrp, index, PnWAMWR, 4095); + rcar_du_plane_write(rgrp, index, PnBTR, 0); + rcar_du_plane_write(rgrp, index, PnMLR, 0); } void rcar_du_plane_setup(struct rcar_du_plane *plane) @@ -276,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_w, uint32_t src_h) { struct rcar_du_plane *rplane = to_rcar_plane(plane); - struct rcar_du_device *rcdu = plane->dev->dev_private; + struct rcar_du_device *rcdu = rplane->group->dev; const struct rcar_du_format_info *format; unsigned int nplanes; int ret; @@ -319,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, rcar_du_plane_compute_base(rplane, fb); rcar_du_plane_setup(rplane); - mutex_lock(&rcdu->planes.lock); + mutex_lock(&rplane->group->planes.lock); rplane->enabled = true; rcar_du_crtc_update_planes(rplane->crtc); - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&rplane->group->planes.lock); return 0; } static int rcar_du_plane_disable(struct drm_plane *plane) { - struct rcar_du_device *rcdu = plane->dev->dev_private; struct rcar_du_plane *rplane = to_rcar_plane(plane); if (!rplane->enabled) return 0; - mutex_lock(&rcdu->planes.lock); + mutex_lock(&rplane->group->planes.lock); rplane->enabled = false; rcar_du_crtc_update_planes(rplane->crtc); - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&rplane->group->planes.lock); rcar_du_plane_release(rplane); @@ -380,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane, static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, unsigned int zpos) { - struct rcar_du_device *rcdu = plane->dev; - - mutex_lock(&rcdu->planes.lock); + mutex_lock(&plane->group->planes.lock); if (plane->zpos == zpos) goto done; @@ -393,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, rcar_du_crtc_update_planes(plane->crtc); done: - mutex_unlock(&rcdu->planes.lock); + mutex_unlock(&plane->group->planes.lock); } static int rcar_du_plane_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t value) { - struct rcar_du_device *rcdu = plane->dev->dev_private; struct rcar_du_plane *rplane = to_rcar_plane(plane); + struct rcar_du_group *rgrp = rplane->group; - if (property == rcdu->planes.alpha) + if (property == rgrp->planes.alpha) rcar_du_plane_set_alpha(rplane, value); - else if (property == rcdu->planes.colorkey) + else if (property == rgrp->planes.colorkey) rcar_du_plane_set_colorkey(rplane, value); - else if (property == rcdu->planes.zpos) + else if (property == rgrp->planes.zpos) rcar_du_plane_set_zpos(rplane, value); else return -EINVAL; @@ -435,37 +434,39 @@ static const uint32_t formats[] = { DRM_FORMAT_NV16, }; -int rcar_du_planes_init(struct rcar_du_device *rcdu) +int rcar_du_planes_init(struct rcar_du_group *rgrp) { + struct rcar_du_planes *planes = &rgrp->planes; + struct rcar_du_device *rcdu = rgrp->dev; unsigned int i; - mutex_init(&rcdu->planes.lock); - rcdu->planes.free = 0xff; + mutex_init(&planes->lock); + planes->free = 0xff; - rcdu->planes.alpha = + planes->alpha = drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); - if (rcdu->planes.alpha == NULL) + if (planes->alpha == NULL) return -ENOMEM; /* The color key is expressed as an RGB888 triplet stored in a 32-bit * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) * or enable source color keying (1). */ - rcdu->planes.colorkey = + planes->colorkey = drm_property_create_range(rcdu->ddev, 0, "colorkey", 0, 0x01ffffff); - if (rcdu->planes.colorkey == NULL) + if (planes->colorkey == NULL) return -ENOMEM; - rcdu->planes.zpos = + planes->zpos = drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7); - if (rcdu->planes.zpos == NULL) + if (planes->zpos == NULL) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { - struct rcar_du_plane *plane = &rcdu->planes.planes[i]; + for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) { + struct rcar_du_plane *plane = &planes->planes[i]; - plane->dev = rcdu; + plane->group = rgrp; plane->hwindex = -1; plane->alpha = 255; plane->colorkey = RCAR_DU_COLORKEY_NONE; @@ -475,8 +476,10 @@ int rcar_du_planes_init(struct rcar_du_device *rcdu) return 0; } -int rcar_du_planes_register(struct rcar_du_device *rcdu) +int rcar_du_planes_register(struct rcar_du_group *rgrp) { + struct rcar_du_planes *planes = &rgrp->planes; + struct rcar_du_device *rcdu = rgrp->dev; unsigned int i; int ret; @@ -487,7 +490,7 @@ int rcar_du_planes_register(struct rcar_du_device *rcdu) if (plane == NULL) return -ENOMEM; - plane->hwplane = &rcdu->planes.planes[i + 2]; + plane->hwplane = &planes->planes[i + 2]; plane->hwplane->zpos = 1; ret = drm_plane_init(rcdu->ddev, &plane->plane, @@ -498,12 +501,12 @@ int rcar_du_planes_register(struct rcar_du_device *rcdu) return ret; drm_object_attach_property(&plane->plane.base, - rcdu->planes.alpha, 255); + planes->alpha, 255); drm_object_attach_property(&plane->plane.base, - rcdu->planes.colorkey, + planes->colorkey, RCAR_DU_COLORKEY_NONE); drm_object_attach_property(&plane->plane.base, - rcdu->planes.zpos, 1); + planes->zpos, 1); } return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index bcf6f76f56a0..f94f9ce84998 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -19,8 +19,8 @@ #include #include -struct rcar_du_device; struct rcar_du_format_info; +struct rcar_du_group; /* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As * using KMS planes requires at least one of the CRTCs being enabled, no more @@ -33,7 +33,7 @@ struct rcar_du_format_info; #define RCAR_DU_NUM_SW_PLANES 9 struct rcar_du_plane { - struct rcar_du_device *dev; + struct rcar_du_group *group; struct drm_crtc *crtc; bool enabled; @@ -67,8 +67,8 @@ struct rcar_du_planes { struct drm_property *zpos; }; -int rcar_du_planes_init(struct rcar_du_device *rcdu); -int rcar_du_planes_register(struct rcar_du_device *rcdu); +int rcar_du_planes_init(struct rcar_du_group *rgrp); +int rcar_du_planes_register(struct rcar_du_group *rgrp); void rcar_du_plane_setup(struct rcar_du_plane *plane); void rcar_du_plane_update_base(struct rcar_du_plane *plane); -- cgit v1.2.3 From 990d07a5a9582f14b4d6d13cde5311d6c694096a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 22:22:23 +0200 Subject: drm/rcar-du: Use dynamic number of CRTCs instead of CRTCs array size The rcar_du_device structure contains a field that stores the number of CRTCs, use it instead of the CRTCs array size. This prepares the driver to support a variable number of CRTCs. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index cdc363b6cd37..fd7cdda375b6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -105,7 +105,7 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) struct rcar_du_device *rcdu = dev->dev_private; unsigned int i; - for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) + for (i = 0; i < rcdu->num_crtcs; ++i) rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index c32e0f9d4823..845bcb384863 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -197,7 +197,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) if (pdata->type == RCAR_DU_ENCODER_UNUSED) continue; - if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { + if (pdata->output >= rcdu->num_crtcs) { dev_warn(rcdu->dev, "encoder %u references unexisting output %u, skipping\n", i, pdata->output); -- cgit v1.2.3 From 660bab56aa048c904a65ce6a8fc2eca2235eec6f Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 16 Jun 2013 23:48:10 +0200 Subject: drm/rcar-du: Remove register definitions for the second channel Channels are accessed through a global channel memory offset, there's no need to define register addresses for the second channel. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_regs.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 3aba27ffc065..195ed7e1756e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -20,7 +20,6 @@ */ #define DSYSR 0x00000 /* display 1 */ -#define D2SYSR 0x30000 /* display 2 */ #define DSYSR_ILTS (1 << 29) #define DSYSR_DSEC (1 << 20) #define DSYSR_IUPD (1 << 16) @@ -35,7 +34,6 @@ #define DSYSR_SCM_INT_VIDEO (3 << 4) #define DSMR 0x00004 -#define D2SMR 0x30004 #define DSMR_VSPM (1 << 28) #define DSMR_ODPM (1 << 27) #define DSMR_DIPM_DISP (0 << 25) @@ -60,7 +58,6 @@ #define DSMR_CSY_MASK (3 << 6) #define DSSR 0x00008 -#define D2SSR 0x30008 #define DSSR_VC1FB_DSA0 (0 << 30) #define DSSR_VC1FB_DSA1 (1 << 30) #define DSSR_VC1FB_DSA2 (2 << 30) @@ -80,7 +77,6 @@ #define DSSR_ADC(n) (1 << ((n)-1)) #define DSRCR 0x0000c -#define D2SRCR 0x3000c #define DSRCR_TVCL (1 << 15) #define DSRCR_FRCL (1 << 14) #define DSRCR_VBCL (1 << 11) @@ -90,7 +86,6 @@ #define DSRCR_MASK 0x0000cbff #define DIER 0x00010 -#define D2IER 0x30010 #define DIER_TVE (1 << 15) #define DIER_FRE (1 << 14) #define DIER_VBE (1 << 11) @@ -114,7 +109,6 @@ #define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */ #define DEFR 0x00020 -#define D2EFR 0x30020 #define DEFR_CODE (0x7773 << 16) #define DEFR_EXSL (1 << 12) #define DEFR_EXVL (1 << 11) @@ -137,12 +131,10 @@ #define DCPCR_DCE (1 << 0) #define DEFR2 0x00034 -#define D2EFR2 0x30034 #define DEFR2_CODE (0x7775 << 16) #define DEFR2_DEFE2G (1 << 0) #define DEFR3 0x00038 -#define D2EFR3 0x30038 #define DEFR3_CODE (0x7776 << 16) #define DEFR3_EVDA (1 << 14) #define DEFR3_EVDM_1 (1 << 12) @@ -153,7 +145,6 @@ #define DEFR3_DEFE3 (1 << 0) #define DEFR4 0x0003c -#define D2EFR4 0x3003c #define DEFR4_CODE (0x7777 << 16) #define DEFR4_LRUO (1 << 5) #define DEFR4_SPCE (1 << 4) -- cgit v1.2.3 From 2fd22dba23e3847651bffa1d9cc37acea05cc351 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 00:11:05 +0200 Subject: drm/rcar-du: Move output routing configuration to group Output routing is configured in group registers, move the corresponding code from rcar_du_crtc.c to rcar_du_group.c. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 21 +-------------------- drivers/gpu/drm/rcar-du/rcar_du_group.c | 19 +++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_group.h | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 7784a3ba7854..6a2b9590bb74 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -130,25 +130,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); } -static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) -{ - struct rcar_du_device *rcdu = rcrtc->group->dev; - u32 dorcr = rcar_du_read(rcdu, DORCR); - - dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); - - /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and - * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by - * default. - */ - if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0) - dorcr |= DORCR_PG2D_DS1; - else - dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; - - rcar_du_write(rcdu, DORCR, dorcr); -} - void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -245,7 +226,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) /* Configure display timings and output routing */ rcar_du_crtc_set_display_timing(rcrtc); - rcar_du_crtc_set_routing(rcrtc); + rcar_du_group_set_routing(rcrtc->group); mutex_lock(&rcrtc->group->planes.lock); rcrtc->plane->enabled = true; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 625b9f446965..7e754515bba8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -125,3 +125,22 @@ void rcar_du_group_restart(struct rcar_du_group *rgrp) __rcar_du_group_start_stop(rgrp, false); __rcar_du_group_start_stop(rgrp, true); } + +void rcar_du_group_set_routing(struct rcar_du_group *rgrp) +{ + struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2]; + u32 dorcr = rcar_du_group_read(rgrp, DORCR); + + dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); + + /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and + * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by + * default. + */ + if (crtc0->outputs & (1 << 1)) + dorcr |= DORCR_PG2D_DS1; + else + dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; + + rcar_du_group_write(rgrp, DORCR, dorcr); +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 748331bbb8fe..180c739812c9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -42,6 +42,6 @@ int rcar_du_group_get(struct rcar_du_group *rgrp); void rcar_du_group_put(struct rcar_du_group *rgrp); void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); void rcar_du_group_restart(struct rcar_du_group *rgrp); - +void rcar_du_group_set_routing(struct rcar_du_group *rgrp); #endif /* __RCAR_DU_GROUP_H__ */ -- cgit v1.2.3 From ef2d84bec6a02c4536cab1e0a8f13792ad86a7bc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jun 2013 14:16:35 +0200 Subject: drm/rcar-du: Add support for the R8A7790 DU The DU revision in the R8A7790 SoC uses one IRQ and clock per CRTC. Add a corresponding entry in the module platform ID table. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 5 +++ drivers/gpu/drm/rcar-du/rcar_du_regs.h | 66 ++++++++++++++++++++++++++++++++-- 2 files changed, 68 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index fd7cdda375b6..381dbad05e58 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -220,8 +220,13 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { .features = 0, }; +static const struct rcar_du_device_info rcar_du_r8a7790_info = { + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK, +}; + static const struct platform_device_id rcar_du_id_table[] = { { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info }, + { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info }, { } }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 195ed7e1756e..f62a9f36041a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -195,6 +195,68 @@ #define DEFR6_MLOS1 (1 << 2) #define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) +/* ----------------------------------------------------------------------------- + * R8A7790-only Control Registers + */ + +#define DD1SSR 0x20008 +#define DD1SSR_TVR (1 << 15) +#define DD1SSR_FRM (1 << 14) +#define DD1SSR_BUF (1 << 12) +#define DD1SSR_VBK (1 << 11) +#define DD1SSR_RINT (1 << 9) +#define DD1SSR_HBK (1 << 8) +#define DD1SSR_ADC(n) (1 << ((n)-1)) + +#define DD1SRCR 0x2000c +#define DD1SRCR_TVR (1 << 15) +#define DD1SRCR_FRM (1 << 14) +#define DD1SRCR_BUF (1 << 12) +#define DD1SRCR_VBK (1 << 11) +#define DD1SRCR_RINT (1 << 9) +#define DD1SRCR_HBK (1 << 8) +#define DD1SRCR_ADC(n) (1 << ((n)-1)) + +#define DD1IER 0x20010 +#define DD1IER_TVR (1 << 15) +#define DD1IER_FRM (1 << 14) +#define DD1IER_BUF (1 << 12) +#define DD1IER_VBK (1 << 11) +#define DD1IER_RINT (1 << 9) +#define DD1IER_HBK (1 << 8) +#define DD1IER_ADC(n) (1 << ((n)-1)) + +#define DEFR8 0x20020 +#define DEFR8_CODE (0x7790 << 16) +#define DEFR8_VSCS (1 << 6) +#define DEFR8_DRGBS_DU(n) ((n) << 4) +#define DEFR8_DRGBS_MASK (3 << 4) +#define DEFR8_DEFE8 (1 << 0) + +#define DOFLR 0x20024 +#define DOFLR_CODE (0x7790 << 16) +#define DOFLR_HSYCFL1 (1 << 13) +#define DOFLR_VSYCFL1 (1 << 12) +#define DOFLR_ODDFL1 (1 << 11) +#define DOFLR_DISPFL1 (1 << 10) +#define DOFLR_CDEFL1 (1 << 9) +#define DOFLR_RGBFL1 (1 << 8) +#define DOFLR_HSYCFL0 (1 << 5) +#define DOFLR_VSYCFL0 (1 << 4) +#define DOFLR_ODDFL0 (1 << 3) +#define DOFLR_DISPFL0 (1 << 2) +#define DOFLR_CDEFL0 (1 << 1) +#define DOFLR_RGBFL0 (1 << 0) + +#define DIDSR 0x20028 +#define DIDSR_CODE (0x7790 << 16) +#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2)) +#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2)) +#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2)) +#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2)) +#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2)) +#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2)) + /* ----------------------------------------------------------------------------- * Display Timing Generation Registers */ @@ -364,12 +426,10 @@ * Display Capture Registers */ +#define DCMR 0x0c100 #define DCMWR 0x0c104 -#define DC2MWR 0x0c204 #define DCSAR 0x0c120 -#define DC2SAR 0x0c220 #define DCMLR 0x0c150 -#define DC2MLR 0x0c250 /* ----------------------------------------------------------------------------- * Color Palette Registers -- cgit v1.2.3 From 9e2d2de9e8107643ba50debc475fc966d3f77364 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jun 2013 20:52:52 +0200 Subject: drm/rcar-du: Fix buffer pitch alignment for R8A7790 DU The R8A7790 DU seems to require a 128 bytes pitch alignment, even though the documentation only mentions a 16 pixels alignement as for the R8A7779 DU. Make this configurable through a device flag. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 1 + drivers/gpu/drm/rcar-du/rcar_du_kms.c | 17 ++++++++++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 381dbad05e58..3cd981589266 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -221,7 +221,7 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B, }; static const struct platform_device_id rcar_du_id_table[] = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 5b57a2f9b52a..072e28e09484 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -26,6 +26,7 @@ struct drm_device; struct rcar_du_device; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ +#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */ /* * struct rcar_du_device_info - DU model-specific information diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 845bcb384863..418d902bc88d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -111,11 +111,18 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { + struct rcar_du_device *rcdu = dev->dev_private; unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); unsigned int align; - /* The pitch must be aligned to a 16 pixels boundary. */ - align = 16 * args->bpp / 8; + /* The R8A7779 DU requires a 16 pixels pitch alignment as documented, + * but the R8A7790 DU seems to require a 128 bytes pitch alignment. + */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B)) + align = 128; + else + align = 16 * args->bpp / 8; + args->pitch = roundup(max(args->pitch, min_pitch), align); return drm_gem_cma_dumb_create(file, dev, args); @@ -125,6 +132,7 @@ static struct drm_framebuffer * rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { + struct rcar_du_device *rcdu = dev->dev_private; const struct rcar_du_format_info *format; unsigned int align; @@ -135,7 +143,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } - align = 16 * format->bpp / 8; + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B)) + align = 128; + else + align = 16 * format->bpp / 8; if (mode_cmd->pitches[0] & (align - 1) || mode_cmd->pitches[0] >= 8192) { -- cgit v1.2.3 From a5f0ef593c4a130f5f5cd4cd506af946e32dd509 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 00:29:25 +0200 Subject: drm/rcar-du: Add support for multiple groups The R8A7790 DU has 3 CRTCs, split in two groups. Support them. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 25 ++++++++++--------- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 2 ++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 6 +++-- drivers/gpu/drm/rcar-du/rcar_du_group.c | 4 +-- drivers/gpu/drm/rcar-du/rcar_du_group.h | 3 +++ drivers/gpu/drm/rcar-du/rcar_du_kms.c | 43 ++++++++++++++++++++++++--------- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 6 +++-- drivers/gpu/drm/rcar-du/rcar_du_regs.h | 4 ++- 8 files changed, 63 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 6a2b9590bb74..a340224e08e6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -91,7 +91,6 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { const struct drm_display_mode *mode = &rcrtc->crtc.mode; - struct rcar_du_device *rcdu = rcrtc->group->dev; unsigned long clk; u32 value; u32 div; @@ -101,9 +100,9 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000); div = clamp(div, 1U, 64U) - 1; - rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR, - ESCR_DCLKSEL_CLKS | div); - rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0); + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, + ESCR_DCLKSEL_CLKS | div); + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); /* Signal polarities */ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) @@ -143,7 +142,6 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output) void rcar_du_crtc_update_planes(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - struct rcar_du_device *rcdu = rcrtc->group->dev; struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; unsigned int num_planes = 0; unsigned int prio = 0; @@ -189,8 +187,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) /* Select display timing and dot clock generator 2 for planes associated * with superposition controller 2. */ - if (rcrtc->index) { - u32 value = rcar_du_read(rcdu, DPTSR); + if (rcrtc->index % 2) { + u32 value = rcar_du_group_read(rcrtc->group, DPTSR); /* The DPTSR register is updated when the display controller is * stopped. We thus need to restart the DU. Once again, sorry @@ -200,13 +198,14 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) * occur only if we need to break the pre-association. */ if (value != dptsr) { - rcar_du_write(rcdu, DPTSR, dptsr); + rcar_du_group_write(rcrtc->group, DPTSR, dptsr); if (rcrtc->group->used_crtcs) rcar_du_group_restart(rcrtc->group); } } - rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr); + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, + dspr); } static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) @@ -528,6 +527,10 @@ static const struct drm_crtc_funcs crtc_funcs = { int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) { + static const unsigned int mmio_offsets[] = { + DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET + }; + struct rcar_du_device *rcdu = rgrp->dev; struct platform_device *pdev = to_platform_device(rcdu->dev); struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; @@ -553,10 +556,10 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) } rcrtc->group = rgrp; - rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; + rcrtc->mmio_offset = mmio_offsets[index]; rcrtc->index = index; rcrtc->dpms = DRM_MODE_DPMS_OFF; - rcrtc->plane = &rgrp->planes.planes[index]; + rcrtc->plane = &rgrp->planes.planes[index % 2]; rcrtc->plane->crtc = crtc; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 3cd981589266..8694a4648860 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -218,10 +218,12 @@ static int rcar_du_remove(struct platform_device *pdev) static const struct rcar_du_device_info rcar_du_r8a7779_info = { .features = 0, + .num_crtcs = 2, }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B, + .num_crtcs = 3, }; static const struct platform_device_id rcar_du_id_table[] = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 072e28e09484..160e5eb8f29d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -31,9 +31,11 @@ struct rcar_du_device; /* * struct rcar_du_device_info - DU model-specific information * @features: device features (RCAR_DU_FEATURE_*) + * @num_crtcs: total number of CRTCs */ struct rcar_du_device_info { unsigned int features; + unsigned int num_crtcs; }; struct rcar_du_device { @@ -45,10 +47,10 @@ struct rcar_du_device { struct drm_device *ddev; - struct rcar_du_crtc crtcs[2]; + struct rcar_du_crtc crtcs[3]; unsigned int num_crtcs; - struct rcar_du_group group; + struct rcar_du_group groups[2]; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 7e754515bba8..0eb106efffc9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -33,12 +33,12 @@ #include "rcar_du_group.h" #include "rcar_du_regs.h" -static u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg) +u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg) { return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg); } -static void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data) +void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data) { rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 180c739812c9..4487e83fb2c1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -38,6 +38,9 @@ struct rcar_du_group { struct rcar_du_planes planes; }; +u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg); +void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data); + int rcar_du_group_get(struct rcar_du_group *rgrp); void rcar_du_group_put(struct rcar_du_group *rgrp); void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 418d902bc88d..816963ca1626 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -172,8 +172,13 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { int rcar_du_modeset_init(struct rcar_du_device *rcdu) { + static const unsigned int mmio_offsets[] = { + DU0_REG_OFFSET, DU2_REG_OFFSET + }; + struct drm_device *dev = rcdu->ddev; struct drm_encoder *encoder; + unsigned int num_groups; unsigned int i; int ret; @@ -185,22 +190,33 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rcdu->ddev->mode_config.max_height = 2047; rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; - rcdu->group.dev = rcdu; - rcdu->group.index = 0; - rcdu->group.used_crtcs = 0; + rcdu->num_crtcs = rcdu->info->num_crtcs; + + /* Initialize the groups. */ + num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2); + + for (i = 0; i < num_groups; ++i) { + struct rcar_du_group *rgrp = &rcdu->groups[i]; - ret = rcar_du_planes_init(&rcdu->group); - if (ret < 0) - return ret; + rgrp->dev = rcdu; + rgrp->mmio_offset = mmio_offsets[i]; + rgrp->index = i; - for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) { - ret = rcar_du_crtc_create(&rcdu->group, i); + ret = rcar_du_planes_init(rgrp); if (ret < 0) return ret; } - rcdu->num_crtcs = i; + /* Create the CRTCs. */ + for (i = 0; i < rcdu->num_crtcs; ++i) { + struct rcar_du_group *rgrp = &rcdu->groups[i / 2]; + + ret = rcar_du_crtc_create(rgrp, i); + if (ret < 0) + return ret; + } + /* Initialize the encoders. */ for (i = 0; i < rcdu->pdata->num_encoders; ++i) { const struct rcar_du_encoder_data *pdata = &rcdu->pdata->encoders[i]; @@ -229,9 +245,12 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = 1 << 0; } - ret = rcar_du_planes_register(&rcdu->group); - if (ret < 0) - return ret; + /* Now that the CRTCs have been initialized register the planes. */ + for (i = 0; i < num_groups; ++i) { + ret = rcar_du_planes_register(&rcdu->groups[i]); + if (ret < 0) + return ret; + } drm_kms_helper_poll_init(rcdu->ddev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 1e9cf7c92f8e..53000644733f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -480,9 +480,12 @@ int rcar_du_planes_register(struct rcar_du_group *rgrp) { struct rcar_du_planes *planes = &rgrp->planes; struct rcar_du_device *rcdu = rgrp->dev; + unsigned int crtcs; unsigned int i; int ret; + crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index)); + for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { struct rcar_du_kms_plane *plane; @@ -493,8 +496,7 @@ int rcar_du_planes_register(struct rcar_du_group *rgrp) plane->hwplane = &planes->planes[i + 2]; plane->hwplane->zpos = 1; - ret = drm_plane_init(rcdu->ddev, &plane->plane, - (1 << rcdu->num_crtcs) - 1, + ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs, &rcar_du_plane_funcs, formats, ARRAY_SIZE(formats), false); if (ret < 0) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index f62a9f36041a..73f7347f740b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -13,7 +13,9 @@ #ifndef __RCAR_DU_REGS_H__ #define __RCAR_DU_REGS_H__ -#define DISP2_REG_OFFSET 0x30000 +#define DU0_REG_OFFSET 0x00000 +#define DU1_REG_OFFSET 0x30000 +#define DU2_REG_OFFSET 0x40000 /* ----------------------------------------------------------------------------- * Display Control Registers -- cgit v1.2.3 From 38b62fb3808e6b57dbd7728e897e4f7674d1c998 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 15 Jun 2013 02:40:57 +0200 Subject: drm/rcar-du: Add support for DEFR8 register The R8A7790 DU has a new extended function control register. Support it. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 3 ++- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 1 + drivers/gpu/drm/rcar-du/rcar_du_group.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 8694a4648860..f8785357b599 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -222,7 +222,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B + | RCAR_DU_FEATURE_DEFR8, .num_crtcs = 3, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 160e5eb8f29d..70c335f51136 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -27,6 +27,7 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */ +#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */ /* * struct rcar_du_device_info - DU model-specific information diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 0eb106efffc9..f3ba0ca845e2 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -51,6 +51,8 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3); rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE); rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5); + if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8)) + rcar_du_group_write(rgrp, DEFR8, DEFR8_CODE | DEFR8_DEFE8); /* Use DS1PR and DS2PR to configure planes priorities and connects the * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. -- cgit v1.2.3 From ef67a902e946ad1ef51040cf287a45cc4714e2b5 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 03:13:11 +0200 Subject: drm/rcar-du: Rework output routing support Split the output routing specification between SoC-internal data, specified in the rcar_du_device_info structure, and board data, passed through platform data. The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). SoC-internal output routing data specify which output are valid, which CRTCs can be connected to the valid outputs, and the type of in-SoC encoder for the output. Platform data then specifies external encoders and the output they are connected to. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 6 ++++-- drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 4 +++- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 30 ++++++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 16 ++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 26 +++++++++++++++++++++----- drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 5 +++-- drivers/gpu/drm/rcar-du/rcar_du_group.c | 8 ++++---- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 17 +++++++++++------ include/linux/platform_data/rcar-du.h | 17 +++++++++++++++-- 9 files changed, 107 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index a340224e08e6..680606ef11d8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -129,14 +129,16 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); } -void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output) +void rcar_du_crtc_route_output(struct drm_crtc *crtc, + enum rcar_du_output output) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct rcar_du_device *rcdu = rcrtc->group->dev; /* Store the route from the CRTC output to the DU output. The DU will be * configured when starting the CRTC. */ - rcrtc->outputs |= 1 << output; + rcrtc->outputs |= BIT(output); } void rcar_du_crtc_update_planes(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 542a7feceb20..39a983d13afb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -15,6 +15,7 @@ #define __RCAR_DU_CRTC_H__ #include +#include #include #include @@ -45,7 +46,8 @@ void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); -void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output); +void rcar_du_crtc_route_output(struct drm_crtc *crtc, + enum rcar_du_output output); void rcar_du_crtc_update_planes(struct drm_crtc *crtc); #endif /* __RCAR_DU_CRTC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index f8785357b599..4bc399734490 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -219,12 +219,42 @@ static int rcar_du_remove(struct platform_device *pdev) static const struct rcar_du_device_info rcar_du_r8a7779_info = { .features = 0, .num_crtcs = 2, + .routes = { + /* R8A7779 has two RGB outputs and one (currently unsupported) + * TCON output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(0), + .encoder_type = DRM_MODE_ENCODER_NONE, + }, + [RCAR_DU_OUTPUT_DPAD1] = { + .possible_crtcs = BIT(1) | BIT(0), + .encoder_type = DRM_MODE_ENCODER_NONE, + }, + }, }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B | RCAR_DU_FEATURE_DEFR8, .num_crtcs = 3, + .routes = { + /* R8A7790 has one RGB output, two LVDS outputs and one + * (currently unsupported) TCON output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(2) | BIT(1) | BIT(0), + .encoder_type = DRM_MODE_ENCODER_NONE, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .encoder_type = DRM_MODE_ENCODER_LVDS, + }, + [RCAR_DU_OUTPUT_LVDS1] = { + .possible_crtcs = BIT(2) | BIT(1), + .encoder_type = DRM_MODE_ENCODER_LVDS, + }, + }, }; static const struct platform_device_id rcar_du_id_table[] = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 70c335f51136..d5243f493903 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -29,14 +29,30 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */ #define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */ +/* + * struct rcar_du_output_routing - Output routing specification + * @possible_crtcs: bitmask of possible CRTCs for the output + * @encoder_type: DRM type of the internal encoder associated with the output + * + * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data + * specify the valid SoC outputs, which CRTCs can drive the output, and the type + * of in-SoC encoder for the output. + */ +struct rcar_du_output_routing { + unsigned int possible_crtcs; + unsigned int encoder_type; +}; + /* * struct rcar_du_device_info - DU model-specific information * @features: device features (RCAR_DU_FEATURE_*) * @num_crtcs: total number of CRTCs + * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) */ struct rcar_du_device_info { unsigned int features; unsigned int num_crtcs; + struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; }; struct rcar_du_device { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 0d0375c7ee44..2aac28d21f87 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -115,10 +115,12 @@ static const struct drm_encoder_funcs encoder_funcs = { }; int rcar_du_encoder_init(struct rcar_du_device *rcdu, - enum rcar_du_encoder_type type, unsigned int output, + enum rcar_du_encoder_type type, + enum rcar_du_output output, const struct rcar_du_encoder_data *data) { struct rcar_du_encoder *renc; + unsigned int encoder_type; int ret; renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); @@ -127,19 +129,33 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, renc->output = output; + switch (type) { + case RCAR_DU_ENCODER_VGA: + encoder_type = DRM_MODE_ENCODER_DAC; + break; + case RCAR_DU_ENCODER_LVDS: + encoder_type = DRM_MODE_ENCODER_LVDS; + break; + case RCAR_DU_ENCODER_NONE: + default: + /* No external encoder, use the internal encoder type. */ + encoder_type = rcdu->info->routes[output].encoder_type; + break; + } + ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs, - type); + encoder_type); if (ret < 0) return ret; drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); - switch (type) { - case RCAR_DU_ENCODER_LVDS: + switch (encoder_type) { + case DRM_MODE_ENCODER_LVDS: return rcar_du_lvds_connector_init(rcdu, renc, &data->connector.lvds.panel); - case RCAR_DU_ENCODER_VGA: + case DRM_MODE_ENCODER_DAC: return rcar_du_vga_connector_init(rcdu, renc); default: diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 08cde1293892..2310416ea21f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -22,7 +22,7 @@ struct rcar_du_device; struct rcar_du_encoder { struct drm_encoder encoder; - unsigned int output; + enum rcar_du_output output; }; #define to_rcar_encoder(e) \ @@ -40,7 +40,8 @@ struct drm_encoder * rcar_du_connector_best_encoder(struct drm_connector *connector); int rcar_du_encoder_init(struct rcar_du_device *rcdu, - enum rcar_du_encoder_type type, unsigned int output, + enum rcar_du_encoder_type type, + enum rcar_du_output output, const struct rcar_du_encoder_data *data); #endif /* __RCAR_DU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index f3ba0ca845e2..9df6fb635c96 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -135,11 +135,11 @@ void rcar_du_group_set_routing(struct rcar_du_group *rgrp) dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); - /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and - * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by - * default. + /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and + * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1 + * by default. */ - if (crtc0->outputs & (1 << 1)) + if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1)) dorcr |= DORCR_PG2D_DS1; else dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 816963ca1626..2b92e68a09f0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -220,11 +220,14 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) for (i = 0; i < rcdu->pdata->num_encoders; ++i) { const struct rcar_du_encoder_data *pdata = &rcdu->pdata->encoders[i]; + const struct rcar_du_output_routing *route = + &rcdu->info->routes[pdata->output]; if (pdata->type == RCAR_DU_ENCODER_UNUSED) continue; - if (pdata->output >= rcdu->num_crtcs) { + if (pdata->output >= RCAR_DU_OUTPUT_MAX || + route->possible_crtcs == 0) { dev_warn(rcdu->dev, "encoder %u references unexisting output %u, skipping\n", i, pdata->output); @@ -234,15 +237,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata); } - /* Set the possible CRTCs and possible clones. All encoders can be - * driven by the CRTC associated with the output they're connected to, - * as well as by CRTC 0. + /* Set the possible CRTCs and possible clones. There's always at least + * one way for all encoders to clone each other, set all bits in the + * possible clones field. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + const struct rcar_du_output_routing *route = + &rcdu->info->routes[renc->output]; - encoder->possible_crtcs = (1 << 0) | (1 << renc->output); - encoder->possible_clones = 1 << 0; + encoder->possible_crtcs = route->possible_crtcs; + encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1; } /* Now that the CRTCs have been initialized register the planes. */ diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h index 64cd8635e6e6..1a2e9901a22e 100644 --- a/include/linux/platform_data/rcar-du.h +++ b/include/linux/platform_data/rcar-du.h @@ -16,8 +16,18 @@ #include +enum rcar_du_output { + RCAR_DU_OUTPUT_DPAD0, + RCAR_DU_OUTPUT_DPAD1, + RCAR_DU_OUTPUT_LVDS0, + RCAR_DU_OUTPUT_LVDS1, + RCAR_DU_OUTPUT_TCON, + RCAR_DU_OUTPUT_MAX, +}; + enum rcar_du_encoder_type { RCAR_DU_ENCODER_UNUSED = 0, + RCAR_DU_ENCODER_NONE, RCAR_DU_ENCODER_VGA, RCAR_DU_ENCODER_LVDS, }; @@ -39,13 +49,16 @@ struct rcar_du_connector_vga_data { /* * struct rcar_du_encoder_data - Encoder platform data * @type: the encoder type (RCAR_DU_ENCODER_*) - * @output: the DU output the connector is connected to + * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*) * @connector.lvds: platform data for LVDS connectors * @connector.vga: platform data for VGA connectors + * + * Encoder platform data describes an on-board encoder, its associated DU SoC + * output, and the connector. */ struct rcar_du_encoder_data { enum rcar_du_encoder_type type; - unsigned int output; + enum rcar_du_output output; union { struct rcar_du_connector_lvds_data lvds; -- cgit v1.2.3 From 7cbc05cb518304b746bea00bc7c0b005217bcaf7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 03:20:08 +0200 Subject: drm/rcar-du: Configure RGB output routing to DPAD0 The R8A7790 DU variant has a single RGB output called DPAD0 that can be fed with the output of DU0, DU1 or DU2. Making the routing configurable. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 5 ++++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 2 ++ drivers/gpu/drm/rcar-du/rcar_du_group.c | 45 ++++++++++++++++++++++++++++++--- drivers/gpu/drm/rcar-du/rcar_du_group.h | 2 +- 4 files changed, 50 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 680606ef11d8..245800ddd1a8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -139,6 +139,11 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, * configured when starting the CRTC. */ rcrtc->outputs |= BIT(output); + + /* Store RGB routing to DPAD0 for R8A7790. */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) && + output == RCAR_DU_OUTPUT_DPAD0) + rcdu->dpad0_source = rcrtc->index; } void rcar_du_crtc_update_planes(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index d5243f493903..924f5e08f060 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -68,6 +68,8 @@ struct rcar_du_device { unsigned int num_crtcs; struct rcar_du_group groups[2]; + + unsigned int dpad0_source; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 9df6fb635c96..eb53cd97e8c6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -27,6 +27,7 @@ * counterpart in the DU documentation, that models those semi-global resources. */ +#include #include #include "rcar_du_drv.h" @@ -43,6 +44,22 @@ void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data) rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data); } +static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) +{ + u32 defr8 = DEFR8_CODE | DEFR8_DEFE8; + + if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8)) + return; + + /* The DEFR8 register for the first group also controls RGB output + * routing to DPAD0 + */ + if (rgrp->index == 0) + defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source); + + rcar_du_group_write(rgrp, DEFR8, defr8); +} + static void rcar_du_group_setup(struct rcar_du_group *rgrp) { /* Enable extended features */ @@ -51,8 +68,8 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3); rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE); rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5); - if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8)) - rcar_du_group_write(rgrp, DEFR8, DEFR8_CODE | DEFR8_DEFE8); + + rcar_du_group_setup_defr8(rgrp); /* Use DS1PR and DS2PR to configure planes priorities and connects the * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. @@ -128,7 +145,27 @@ void rcar_du_group_restart(struct rcar_du_group *rgrp) __rcar_du_group_start_stop(rgrp, true); } -void rcar_du_group_set_routing(struct rcar_du_group *rgrp) +static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu) +{ + int ret; + + /* RGB output routing to DPAD0 is configured in the DEFR8 register of + * the first group. As this function can be called with the DU0 and DU1 + * CRTCs disabled, we need to enable the first group clock before + * accessing the register. + */ + ret = clk_prepare_enable(rcdu->crtcs[0].clock); + if (ret < 0) + return ret; + + rcar_du_group_setup_defr8(&rcdu->groups[0]); + + clk_disable_unprepare(rcdu->crtcs[0].clock); + + return 0; +} + +int rcar_du_group_set_routing(struct rcar_du_group *rgrp) { struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2]; u32 dorcr = rcar_du_group_read(rgrp, DORCR); @@ -145,4 +182,6 @@ void rcar_du_group_set_routing(struct rcar_du_group *rgrp) dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; rcar_du_group_write(rgrp, DORCR, dorcr); + + return rcar_du_set_dpad0_routing(rgrp->dev); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 4487e83fb2c1..5025930972ec 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -45,6 +45,6 @@ int rcar_du_group_get(struct rcar_du_group *rgrp); void rcar_du_group_put(struct rcar_du_group *rgrp); void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); void rcar_du_group_restart(struct rcar_du_group *rgrp); -void rcar_du_group_set_routing(struct rcar_du_group *rgrp); +int rcar_du_group_set_routing(struct rcar_du_group *rgrp); #endif /* __RCAR_DU_GROUP_H__ */ -- cgit v1.2.3 From 90374b5c25c9f04895c52a1e7a2468ee8dac525b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 17 Jun 2013 13:48:27 +0200 Subject: drm/rcar-du: Add internal LVDS encoder support The R8A7790 includes two internal LVDS encoders. Support them in the DU driver. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Kconfig | 7 ++ drivers/gpu/drm/rcar-du/Makefile | 4 +- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 2 - drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 2 + drivers/gpu/drm/rcar-du/rcar_du_drv.c | 2 + drivers/gpu/drm/rcar-du/rcar_du_drv.h | 4 + drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 38 ++++++ drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 2 + drivers/gpu/drm/rcar-du/rcar_du_kms.c | 5 + drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c | 196 ++++++++++++++++++++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h | 46 +++++++ drivers/gpu/drm/rcar-du/rcar_lvds_regs.h | 69 +++++++++++ 12 files changed, 374 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c create mode 100644 drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h create mode 100644 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 72887df8dd76..c590cd9dca0b 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -7,3 +7,10 @@ config DRM_RCAR_DU help Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. + +config DRM_RCAR_LVDS + bool "R-Car DU LVDS Encoder Support" + depends on DRM_RCAR_DU + help + Enable support the R-Car Display Unit embedded LVDS encoders + (currently only on R8A7790). diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index b9b5e666fbba..12b8d4477835 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -7,4 +7,6 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_plane.o \ rcar_du_vgacon.o -obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o +rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o + +obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 245800ddd1a8..33df7a583143 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -26,8 +26,6 @@ #include "rcar_du_plane.h" #include "rcar_du_regs.h" -#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) - static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) { struct rcar_du_device *rcdu = rcrtc->group->dev; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 39a983d13afb..43e7575c700c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -39,6 +39,8 @@ struct rcar_du_crtc { struct rcar_du_plane *plane; }; +#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) + int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 4bc399734490..38a8b52624ce 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -232,6 +232,7 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { .encoder_type = DRM_MODE_ENCODER_NONE, }, }, + .num_lvds = 0, }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { @@ -255,6 +256,7 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { .encoder_type = DRM_MODE_ENCODER_LVDS, }, }, + .num_lvds = 2, }; static const struct platform_device_id rcar_du_id_table[] = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 924f5e08f060..050d71c1f785 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -24,6 +24,7 @@ struct clk; struct device; struct drm_device; struct rcar_du_device; +struct rcar_du_lvdsenc; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */ @@ -48,11 +49,13 @@ struct rcar_du_output_routing { * @features: device features (RCAR_DU_FEATURE_*) * @num_crtcs: total number of CRTCs * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) + * @num_lvds: number of internal LVDS encoders */ struct rcar_du_device_info { unsigned int features; unsigned int num_crtcs; struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; + unsigned int num_lvds; }; struct rcar_du_device { @@ -70,6 +73,7 @@ struct rcar_du_device { struct rcar_du_group groups[2]; unsigned int dpad0_source; + struct rcar_du_lvdsenc *lvds[2]; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 2aac28d21f87..3daa7a168dc6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -11,6 +11,8 @@ * (at your option) any later version. */ +#include + #include #include #include @@ -19,6 +21,7 @@ #include "rcar_du_encoder.h" #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" +#include "rcar_du_lvdsenc.h" #include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- @@ -39,12 +42,17 @@ rcar_du_connector_best_encoder(struct drm_connector *connector) static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode) { + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + + if (renc->lvds) + rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode); } static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); const struct drm_display_mode *panel_mode; struct drm_device *dev = encoder->dev; struct drm_connector *connector; @@ -82,15 +90,32 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, /* The flat panel mode is fixed, just copy it to the adjusted mode. */ drm_mode_copy(adjusted_mode, panel_mode); + /* The internal LVDS encoder has a clock frequency operating range of + * 30MHz to 150MHz. Clamp the clock accordingly. + */ + if (renc->lvds) + adjusted_mode->clock = clamp(adjusted_mode->clock, + 30000, 150000); + return true; } static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder) { + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + + if (renc->lvds) + rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, + DRM_MODE_DPMS_OFF); } static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder) { + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + + if (renc->lvds) + rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, + DRM_MODE_DPMS_ON); } static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, @@ -129,6 +154,19 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, renc->output = output; + switch (output) { + case RCAR_DU_OUTPUT_LVDS0: + renc->lvds = rcdu->lvds[0]; + break; + + case RCAR_DU_OUTPUT_LVDS1: + renc->lvds = rcdu->lvds[1]; + break; + + default: + break; + } + switch (type) { case RCAR_DU_ENCODER_VGA: encoder_type = DRM_MODE_ENCODER_DAC; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 2310416ea21f..0e5a65e45d0e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -19,10 +19,12 @@ #include struct rcar_du_device; +struct rcar_du_lvdsenc; struct rcar_du_encoder { struct drm_encoder encoder; enum rcar_du_output output; + struct rcar_du_lvdsenc *lvds; }; #define to_rcar_encoder(e) \ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 2b92e68a09f0..cc71b1a0c3ce 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -21,6 +21,7 @@ #include "rcar_du_drv.h" #include "rcar_du_encoder.h" #include "rcar_du_kms.h" +#include "rcar_du_lvdsenc.h" #include "rcar_du_regs.h" /* ----------------------------------------------------------------------------- @@ -217,6 +218,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) } /* Initialize the encoders. */ + ret = rcar_du_lvdsenc_init(rcdu); + if (ret < 0) + return ret; + for (i = 0; i < rcdu->pdata->num_encoders; ++i) { const struct rcar_du_encoder_data *pdata = &rcdu->pdata->encoders[i]; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c new file mode 100644 index 000000000000..a0f6a1781925 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -0,0 +1,196 @@ +/* + * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "rcar_du_drv.h" +#include "rcar_du_encoder.h" +#include "rcar_du_lvdsenc.h" +#include "rcar_lvds_regs.h" + +struct rcar_du_lvdsenc { + struct rcar_du_device *dev; + + unsigned int index; + void __iomem *mmio; + struct clk *clock; + int dpms; + + enum rcar_lvds_input input; +}; + +static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data) +{ + iowrite32(data, lvds->mmio + reg); +} + +static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + struct rcar_du_crtc *rcrtc) +{ + const struct drm_display_mode *mode = &rcrtc->crtc.mode; + unsigned int freq = mode->clock; + u32 lvdcr0; + u32 pllcr; + int ret; + + if (lvds->dpms == DRM_MODE_DPMS_ON) + return 0; + + ret = clk_prepare_enable(lvds->clock); + if (ret < 0) + return ret; + + /* PLL clock configuration */ + if (freq <= 38000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; + else if (freq <= 60000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; + else if (freq <= 121000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; + else + pllcr = LVDPLLCR_PLLDLYCNT_150M; + + rcar_lvds_write(lvds, LVDPLLCR, pllcr); + + /* Hardcode the channels and control signals routing for now. + * + * HSYNC -> CTRL0 + * VSYNC -> CTRL1 + * DISP -> CTRL2 + * 0 -> CTRL3 + * + * Channels 1 and 3 are switched on ES1. + */ + rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO | + LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC | + LVDCTRCR_CTR0SEL_HSYNC); + rcar_lvds_write(lvds, LVDCHCR, + LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) | + LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1)); + + /* Select the input, hardcode mode 0, enable LVDS operation and turn + * bias circuitry on. + */ + lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN; + if (rcrtc->index == 2) + lvdcr0 |= LVDCR0_DUSEL; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + /* Turn all the channels on. */ + rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) | + LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY); + + /* Turn the PLL on, wait for the startup delay, and turn the output + * on. + */ + lvdcr0 |= LVDCR0_PLLEN; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); + + lvdcr0 |= LVDCR0_LVRES; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + lvds->dpms = DRM_MODE_DPMS_ON; + return 0; +} + +static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds) +{ + if (lvds->dpms == DRM_MODE_DPMS_OFF) + return; + + rcar_lvds_write(lvds, LVDCR0, 0); + rcar_lvds_write(lvds, LVDCR1, 0); + + clk_disable_unprepare(lvds->clock); + + lvds->dpms = DRM_MODE_DPMS_OFF; +} + +int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, + struct drm_crtc *crtc, int mode) +{ + if (mode == DRM_MODE_DPMS_OFF) { + rcar_du_lvdsenc_stop(lvds); + return 0; + } else if (crtc) { + struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + return rcar_du_lvdsenc_start(lvds, rcrtc); + } else + return -EINVAL; +} + +static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds, + struct platform_device *pdev) +{ + struct resource *mem; + char name[7]; + + sprintf(name, "lvds.%u", lvds->index); + + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (mem == NULL) { + dev_err(&pdev->dev, "failed to get memory resource for %s\n", + name); + return -EINVAL; + } + + lvds->mmio = devm_ioremap_resource(&pdev->dev, mem); + if (lvds->mmio == NULL) { + dev_err(&pdev->dev, "failed to remap memory resource for %s\n", + name); + return -ENOMEM; + } + + lvds->clock = devm_clk_get(&pdev->dev, name); + if (IS_ERR(lvds->clock)) { + dev_err(&pdev->dev, "failed to get clock for %s\n", name); + return PTR_ERR(lvds->clock); + } + + return 0; +} + +int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) +{ + struct platform_device *pdev = to_platform_device(rcdu->dev); + struct rcar_du_lvdsenc *lvds; + unsigned int i; + int ret; + + for (i = 0; i < rcdu->info->num_lvds; ++i) { + lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); + if (lvds == NULL) { + dev_err(&pdev->dev, "failed to allocate private data\n"); + return -ENOMEM; + } + + lvds->dev = rcdu; + lvds->index = i; + lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0; + lvds->dpms = DRM_MODE_DPMS_OFF; + + ret = rcar_du_lvdsenc_get_resources(lvds, pdev); + if (ret < 0) + return ret; + + rcdu->lvds[i] = lvds; + } + + return 0; +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h new file mode 100644 index 000000000000..7051c6de19ae --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h @@ -0,0 +1,46 @@ +/* + * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_LVDSENC_H__ +#define __RCAR_DU_LVDSENC_H__ + +#include +#include +#include + +struct rcar_drm_crtc; +struct rcar_du_lvdsenc; + +enum rcar_lvds_input { + RCAR_LVDS_INPUT_DU0, + RCAR_LVDS_INPUT_DU1, + RCAR_LVDS_INPUT_DU2, +}; + +#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) +int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu); +int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, + struct drm_crtc *crtc, int mode); +#else +static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) +{ + return 0; +} +static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, + struct drm_crtc *crtc, int mode) +{ + return 0; +} +#endif + +#endif /* __RCAR_DU_LVDSENC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h new file mode 100644 index 000000000000..77cf9289ab65 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h @@ -0,0 +1,69 @@ +/* + * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions + * + * Copyright (C) 2013 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __RCAR_LVDS_REGS_H__ +#define __RCAR_LVDS_REGS_H__ + +#define LVDCR0 0x0000 +#define LVDCR0_DUSEL (1 << 15) +#define LVDCR0_DMD (1 << 12) +#define LVDCR0_LVMD_MASK (0xf << 8) +#define LVDCR0_LVMD_SHIFT 8 +#define LVDCR0_PLLEN (1 << 4) +#define LVDCR0_BEN (1 << 2) +#define LVDCR0_LVEN (1 << 1) +#define LVDCR0_LVRES (1 << 0) + +#define LVDCR1 0x0004 +#define LVDCR1_CKSEL (1 << 15) +#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2)) +#define LVDCR1_CLKSTBY (3 << 0) + +#define LVDPLLCR 0x0008 +#define LVDPLLCR_CEEN (1 << 14) +#define LVDPLLCR_FBEN (1 << 13) +#define LVDPLLCR_COSEL (1 << 12) +#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0) +#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0) +#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0) +#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0) +#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0) + +#define LVDCTRCR 0x000c +#define LVDCTRCR_CTR3SEL_ZERO (0 << 12) +#define LVDCTRCR_CTR3SEL_ODD (1 << 12) +#define LVDCTRCR_CTR3SEL_CDE (2 << 12) +#define LVDCTRCR_CTR3SEL_MASK (7 << 12) +#define LVDCTRCR_CTR2SEL_DISP (0 << 8) +#define LVDCTRCR_CTR2SEL_ODD (1 << 8) +#define LVDCTRCR_CTR2SEL_CDE (2 << 8) +#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8) +#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8) +#define LVDCTRCR_CTR2SEL_MASK (7 << 8) +#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4) +#define LVDCTRCR_CTR1SEL_DISP (1 << 4) +#define LVDCTRCR_CTR1SEL_ODD (2 << 4) +#define LVDCTRCR_CTR1SEL_CDE (3 << 4) +#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4) +#define LVDCTRCR_CTR1SEL_MASK (7 << 4) +#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0) +#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0) +#define LVDCTRCR_CTR0SEL_DISP (2 << 0) +#define LVDCTRCR_CTR0SEL_ODD (3 << 0) +#define LVDCTRCR_CTR0SEL_CDE (4 << 0) +#define LVDCTRCR_CTR0SEL_MASK (7 << 0) + +#define LVDCHCR 0x0010 +#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4)) +#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4)) + +#endif /* __RCAR_LVDS_REGS_H__ */ -- cgit v1.2.3 From 3864c6f446f3c2ebbeca1d45e28452682706c1aa Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 14 Mar 2013 22:45:22 +0100 Subject: drm/rcar-du: Add FBDEV emulation support Use the FB CMA helpers to implement FBDEV emulation support. The VGA connector status must be reported as connector_status_connected instead of connector_status_unknown to be usable by the emulation layer. Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 14 +++++++++++++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 2 ++ drivers/gpu/drm/rcar-du/rcar_du_kms.c | 36 +++++++++++++++++++++++++------- drivers/gpu/drm/rcar-du/rcar_du_vgacon.c | 2 +- 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 38a8b52624ce..e113352bacdc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -21,6 +21,7 @@ #include #include +#include #include #include "rcar_du_crtc.h" @@ -34,6 +35,11 @@ static int rcar_du_unload(struct drm_device *dev) { + struct rcar_du_device *rcdu = dev->dev_private; + + if (rcdu->fbdev) + drm_fbdev_cma_fini(rcdu->fbdev); + drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); @@ -109,6 +115,13 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); } +static void rcar_du_lastclose(struct drm_device *dev) +{ + struct rcar_du_device *rcdu = dev->dev_private; + + drm_fbdev_cma_restore_mode(rcdu->fbdev); +} + static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) { struct rcar_du_device *rcdu = dev->dev_private; @@ -145,6 +158,7 @@ static struct drm_driver rcar_du_driver = { .load = rcar_du_load, .unload = rcar_du_unload, .preclose = rcar_du_preclose, + .lastclose = rcar_du_lastclose, .get_vblank_counter = drm_vblank_count, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 050d71c1f785..65d2d636b002 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -23,6 +23,7 @@ struct clk; struct device; struct drm_device; +struct drm_fbdev_cma; struct rcar_du_device; struct rcar_du_lvdsenc; @@ -66,6 +67,7 @@ struct rcar_du_device { void __iomem *mmio; struct drm_device *ddev; + struct drm_fbdev_cma *fbdev; struct rcar_du_crtc crtcs[3]; unsigned int num_crtcs; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index cc71b1a0c3ce..b31ac080c4a7 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -167,8 +167,16 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return drm_fb_cma_create(dev, file_priv, mode_cmd); } +static void rcar_du_output_poll_changed(struct drm_device *dev) +{ + struct rcar_du_device *rcdu = dev->dev_private; + + drm_fbdev_cma_hotplug_event(rcdu->fbdev); +} + static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { .fb_create = rcar_du_fb_create, + .output_poll_changed = rcar_du_output_poll_changed, }; int rcar_du_modeset_init(struct rcar_du_device *rcdu) @@ -179,17 +187,18 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) struct drm_device *dev = rcdu->ddev; struct drm_encoder *encoder; + struct drm_fbdev_cma *fbdev; unsigned int num_groups; unsigned int i; int ret; - drm_mode_config_init(rcdu->ddev); + drm_mode_config_init(dev); - rcdu->ddev->mode_config.min_width = 0; - rcdu->ddev->mode_config.min_height = 0; - rcdu->ddev->mode_config.max_width = 4095; - rcdu->ddev->mode_config.max_height = 2047; - rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 4095; + dev->mode_config.max_height = 2047; + dev->mode_config.funcs = &rcar_du_mode_config_funcs; rcdu->num_crtcs = rcdu->info->num_crtcs; @@ -262,9 +271,20 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) return ret; } - drm_kms_helper_poll_init(rcdu->ddev); + drm_kms_helper_poll_init(dev); + + drm_helper_disable_unused_functions(dev); + + fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc, + dev->mode_config.num_connector); + if (IS_ERR(fbdev)) + return PTR_ERR(fbdev); + +#ifndef CONFIG_FRAMEBUFFER_CONSOLE + drm_fbdev_cma_restore_mode(fbdev); +#endif - drm_helper_disable_unused_functions(rcdu->ddev); + rcdu->fbdev = fbdev; return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 36105db9bda1..41d563adfeaa 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -46,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector) static enum drm_connector_status rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) { - return connector_status_unknown; + return connector_status_connected; } static const struct drm_connector_funcs connector_funcs = { -- cgit v1.2.3 From c76ce038e31a2b30bc3dd816f0aefaf685097a0a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 8 Aug 2013 14:41:03 +0100 Subject: drm/i915: Update rules for reading cache lines through the LLC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LLC is a fun device. The cache is a distinct functional block within the SA that arbitrates access from both the CPU and GPU cores. As such all writes to memory land first in the LLC before further action is taken. For example, an uncached write from either the CPU or GPU will then proceed to memory and evict the cacheline from the LLC. This means that a read from the LLC always returns the correct information even if the PTE bit in the GPU differs from the PAT bit in the CPU. For the older snooping architecture on non-LLC, the fundamental principle still holds except that some coordination is required between the CPU and GPU to explicitly perform the snooping (which is handled by our request tracking). The upshot of this is that we know that we can issue a read from either LLC devices or snoopable memory and trust the contents of the cache - i.e. we can forgo a clflush before a read in these circumstances. Writing to memory from the CPU is a little more tricky as we have to consider that the scanout does not read from the CPU cache at all, but from main memory. So we have to currently treat all requests to write to uncached memory as having to be flushed to main memory for coherency with all consumers. Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 79cef3c9b1ad..2a1c87141693 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -61,6 +61,12 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +static bool cpu_cache_is_coherent(struct drm_device *dev, + enum i915_cache_level level) +{ + return HAS_LLC(dev) || level != I915_CACHE_NONE; +} + static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) { if (obj->tiling_mode) @@ -420,8 +426,7 @@ i915_gem_shmem_pread(struct drm_device *dev, * read domain and manually flush cachelines (if required). This * optimizes for the case when the gpu will dirty the data * anyway again before the next pread happens. */ - if (obj->cache_level == I915_CACHE_NONE) - needs_clflush = 1; + needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) @@ -745,11 +750,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, return ret; } } - /* Same trick applies for invalidate partially written cachelines before - * writing. */ - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) - && obj->cache_level == I915_CACHE_NONE) - needs_clflush_before = 1; + /* Same trick applies to invalidate partially written cachelines read + * before writing. */ + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + needs_clflush_before = + !cpu_cache_is_coherent(dev, obj->cache_level); ret = i915_gem_object_get_pages(obj); if (ret) @@ -3597,7 +3602,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) /* Flush the CPU cache if it's still invalid. */ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { - i915_gem_clflush_object(obj); + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + i915_gem_clflush_object(obj); obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } -- cgit v1.2.3 From cc98b413c197c4c6a62b1e469e9d05e613571af5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 9 Aug 2013 12:25:09 +0100 Subject: drm/i915: Track when an object is pinned for use by the display engine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The display engine has unique coherency rules such that it requires special handling to ensure that all writes to cursors, scanouts and sprites are clflushed. This patch introduces the infrastructure to simply track when an object is being accessed by the display engine. v2: Explain the is_pin_display() magic as the sources for obj->pin_count and their individual rules is not obvious. (Ville) Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 ++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 36 ++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1a87cc9fd899..eb87865c20d4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -117,6 +117,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (name: %d)", obj->base.name); if (obj->pin_count) seq_printf(m, " (pinned x %d)", obj->pin_count); + if (obj->pin_display) + seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); list_for_each_entry(vma, &obj->vma_list, vma_link) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6abf8f9d9b14..b5df2308f8e6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1377,6 +1377,7 @@ struct drm_i915_gem_object { */ unsigned int fault_mappable:1; unsigned int pin_mappable:1; + unsigned int pin_display:1; /* * Is the GPU currently using a fence to access this buffer, @@ -1867,6 +1868,7 @@ int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2a1c87141693..b98c3b0e5a02 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3501,6 +3501,22 @@ unlock: return ret; } +static bool is_pin_display(struct drm_i915_gem_object *obj) +{ + /* There are 3 sources that pin objects: + * 1. The display engine (scanouts, sprites, cursors); + * 2. Reservations for execbuffer; + * 3. The user. + * + * We can ignore reservations as we hold the struct_mutex and + * are only called outside of the reservation path. The user + * can only increment pin_count once, and so if after + * subtracting the potential reference by the user, any pin_count + * remains, it must be due to another use by the display engine. + */ + return obj->pin_count - !!obj->user_pin_count; +} + /* * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows @@ -3520,6 +3536,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, return ret; } + /* Mark the pin_display early so that we account for the + * display coherency whilst setting up the cache domains. + */ + obj->pin_display = true; + /* The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is * done with uncached PTEs. This is lowest common denominator for all @@ -3531,7 +3552,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, */ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); if (ret) - return ret; + goto err_unpin_display; /* As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we @@ -3539,7 +3560,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, */ ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); if (ret) - return ret; + goto err_unpin_display; i915_gem_object_flush_cpu_write_domain(obj); @@ -3557,6 +3578,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, old_write_domain); return 0; + +err_unpin_display: + obj->pin_display = is_pin_display(obj); + return ret; +} + +void +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin(obj); + obj->pin_display = is_pin_display(obj); } int diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 370c902fa629..bafdc3e21e87 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1877,7 +1877,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); err_interruptible: dev_priv->mm.interruptible = true; return ret; @@ -1886,7 +1886,7 @@ err_interruptible: void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_fence(obj); - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); } /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel @@ -6759,7 +6759,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else - i915_gem_object_unpin(intel_crtc->cursor_bo); + i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } @@ -6774,7 +6774,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: -- cgit v1.2.3 From 2c22569bba8af6c2976d5f9479fe54a53a39966b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 9 Aug 2013 12:26:45 +0100 Subject: drm/i915: Update rules for writing through the LLC with the cpu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As mentioned in the previous commit, reads and writes from both the CPU and GPU go through the LLC. This gives us coherency between the CPU and GPU irrespective of the attribute settings either device sets. We can use to avoid having to clflush even uncached memory. Except for the scanout. The scanout resides within another functional block that does not use the LLC but reads directly from main memory. So in order to maintain coherency with the scanout, writes to uncached memory must be flushed. In order to optimize writes elsewhere, we start tracking whether an framebuffer is attached to an object. v2: Use pin_display tracking rather than fb_count (to ensure we flush cursors as well etc) and only force the clflush along explicit writes to the scanout paths (i.e. pin_to_display_plane and pwrite into scanout). v3: Force the flush after hitting the slowpath in pwrite, as after dropping the lock the object's cache domain may be invalidated. (Ville) Based on a patch by Ville Syrjälä. Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 58 ++++++++++++++++-------------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- 4 files changed, 35 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b5df2308f8e6..34d3f2fae8ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1841,7 +1841,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error) } void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b98c3b0e5a02..54d76e9392d8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -37,7 +37,8 @@ #include static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); -static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, + bool force); static __must_check int i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -67,6 +68,14 @@ static bool cpu_cache_is_coherent(struct drm_device *dev, return HAS_LLC(dev) || level != I915_CACHE_NONE; } +static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + return true; + + return obj->pin_display; +} + static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) { if (obj->tiling_mode) @@ -742,8 +751,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * write domain and manually flush cachelines (if required). This * optimizes for the case when the gpu will use the data * right away and we therefore have to clflush anyway. */ - if (obj->cache_level == I915_CACHE_NONE) - needs_clflush_after = 1; + needs_clflush_after = cpu_write_needs_clflush(obj); if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) @@ -833,7 +841,7 @@ out: */ if (!needs_clflush_after && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, obj->pin_display); i915_gem_chipset_flush(dev); } } @@ -911,9 +919,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - if (obj->cache_level == I915_CACHE_NONE && - obj->tiling_mode == I915_TILING_NONE && - obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + if (obj->tiling_mode == I915_TILING_NONE && + obj->base.write_domain != I915_GEM_DOMAIN_CPU && + cpu_write_needs_clflush(obj)) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between @@ -1262,8 +1270,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, } /* Pinned buffers may be scanout, so flush the cache */ - if (obj->pin_count) - i915_gem_object_flush_cpu_write_domain(obj); + if (obj->pin_display) + i915_gem_object_flush_cpu_write_domain(obj, true); drm_gem_object_unreference(&obj->base); unlock: @@ -1640,7 +1648,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) * hope for the best. */ WARN_ON(ret != -EIO); - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, true); obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } @@ -3217,7 +3225,8 @@ err_unpin: } void -i915_gem_clflush_object(struct drm_i915_gem_object *obj) +i915_gem_clflush_object(struct drm_i915_gem_object *obj, + bool force) { /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen @@ -3241,7 +3250,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) * snooping behaviour occurs naturally as the result of our domain * tracking. */ - if (obj->cache_level != I915_CACHE_NONE) + if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) return; trace_i915_gem_object_clflush(obj); @@ -3278,14 +3287,15 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) /** Flushes the CPU write domain for the object if it's dirty. */ static void -i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, + bool force) { uint32_t old_write_domain; if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, force); i915_gem_chipset_flush(obj->base.dev); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3319,7 +3329,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - i915_gem_object_flush_cpu_write_domain(obj); + i915_gem_object_flush_cpu_write_domain(obj, false); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -3409,7 +3419,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, obj, cache_level); } - if (cache_level == I915_CACHE_NONE) { + list_for_each_entry(vma, &obj->vma_list, vma_link) + vma->node.color = cache_level; + obj->cache_level = cache_level; + + if (cpu_write_needs_clflush(obj)) { u32 old_read_domains, old_write_domain; /* If we're coming from LLC cached, then we haven't @@ -3432,9 +3446,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, old_write_domain); } - list_for_each_entry(vma, &obj->vma_list, vma_link) - vma->node.color = cache_level; - obj->cache_level = cache_level; i915_gem_verify_gtt(dev); return 0; } @@ -3562,7 +3573,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (ret) goto err_unpin_display; - i915_gem_object_flush_cpu_write_domain(obj); + i915_gem_object_flush_cpu_write_domain(obj, true); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -3634,8 +3645,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) /* Flush the CPU cache if it's still invalid. */ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { - if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, false); obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } @@ -3817,10 +3827,6 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj->user_pin_count++; obj->pin_filp = file; - /* XXX - flush the CPU caches for pinned objects - * as the X server doesn't manage domains yet - */ - i915_gem_object_flush_cpu_write_domain(obj); args->offset = i915_gem_obj_ggtt_offset(obj); out: drm_gem_object_unreference(&obj->base); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8ccc29ac9629..e999578a021c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -716,7 +716,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, false); flush_domains |= obj->base.write_domain; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 24fb989593f0..c9420c280cf0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -487,7 +487,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) dev_priv->gtt.base.total / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, obj->pin_display); i915_gem_gtt_bind_object(obj, obj->cache_level); } -- cgit v1.2.3 From d46f1c3f1372e3a72fab97c60480aa4a1084387f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 8 Aug 2013 14:41:06 +0100 Subject: drm/i915: Allow the GPU to cache stolen memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a corollary to reviewing the interaction between LLC and our cache domains, the GPU PTE bits are independent of the CPU PAT bits. As such we can set the cache level on stolen memory based on how we wish the GPU to cache accesses to it. So we are free to set the same default cache levels as for normal bo, i.e. enable LLC cacheing by default where appropriate. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e68c4b5da46d..e20d64966c72 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -287,9 +287,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev, i915_gem_object_pin_pages(obj); obj->stolen = stolen; - obj->base.write_domain = I915_GEM_DOMAIN_GTT; - obj->base.read_domains = I915_GEM_DOMAIN_GTT; - obj->cache_level = I915_CACHE_NONE; + obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; + obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; return obj; -- cgit v1.2.3 From 28a8194c12f8c8bb46aecd4cb1f36bac716714c4 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Wed, 14 Aug 2013 19:14:17 +0200 Subject: drm/gma500/cdv: Add and hook up chip op for watermarks Add a callback hook to the chip ops struct to allow chips to have their specific fifo watermark update function. Currently only cdv actually tries to set wms based on crtc configuration but if/when the other chips needs it we can attach a callback for them as well. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_device.c | 1 + drivers/gpu/drm/gma500/cdv_device.h | 1 + drivers/gpu/drm/gma500/cdv_intel_display.c | 2 +- drivers/gpu/drm/gma500/gma_display.c | 2 +- drivers/gpu/drm/gma500/gma_display.h | 2 -- drivers/gpu/drm/gma500/psb_drv.h | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index daa45b5b3a45..747d903fe49f 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -656,4 +656,5 @@ const struct psb_ops cdv_chip_ops = { .restore_regs = cdv_restore_display_registers, .power_down = cdv_power_down, .power_up = cdv_power_up, + .update_wm = cdv_update_wm, }; diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 602406bb6d0f..9e4805627450 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h @@ -26,3 +26,4 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device * int reg); extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); +extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index ee8a502348ce..feacbbaabf14 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -532,7 +532,7 @@ void cdv_intel_disable_self_refresh(struct drm_device *dev) } -void cdv_intel_update_watermark(struct drm_device *dev, struct drm_crtc *crtc) +void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) { if (cdv_intel_single_pipe_active(dev)) { diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index cd253caf5672..fe972c930c2c 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -321,7 +321,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) } if (IS_CDV(dev)) - cdv_intel_update_watermark(dev, crtc); + dev_priv->ops->update_wm(dev, crtc); /* Set FIFO watermarks */ REG_WRITE(DSPARB, 0x3F3E); diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 1044c165c714..9a4e8941bd25 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -103,6 +103,4 @@ extern bool gma_find_best_pll(const struct gma_limit_t *limit, /* Cedarview specific functions */ extern void cdv_intel_disable_self_refresh(struct drm_device *dev); -extern void cdv_intel_update_watermark(struct drm_device *dev, - struct drm_crtc *crtc); #endif diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index ed1e567b7e3b..effd69502be5 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -695,6 +695,7 @@ struct psb_ops { int (*restore_regs)(struct drm_device *dev); int (*power_up)(struct drm_device *dev); int (*power_down)(struct drm_device *dev); + void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc); void (*lvds_bl_power)(struct drm_device *dev, bool on); #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -- cgit v1.2.3 From 75346fe9bc4c9b366c760200a665a2c55b789389 Mon Sep 17 00:00:00 2001 From: Patrik Jakobsson Date: Thu, 15 Aug 2013 00:54:44 +0200 Subject: drm/gma500/cdv: Add and hook up chip op for disabling sr Add a callback hook to the chip ops struct to allow chips to have their specific self-refresh function. Currently only used by cdv. Signed-off-by: Patrik Jakobsson --- drivers/gpu/drm/gma500/cdv_device.c | 1 + drivers/gpu/drm/gma500/cdv_device.h | 1 + drivers/gpu/drm/gma500/cdv_intel_display.c | 6 +++--- drivers/gpu/drm/gma500/gma_display.c | 2 +- drivers/gpu/drm/gma500/gma_display.h | 3 --- drivers/gpu/drm/gma500/psb_drv.h | 1 + 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 747d903fe49f..162f686c532d 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -657,4 +657,5 @@ const struct psb_ops cdv_chip_ops = { .power_down = cdv_power_down, .power_up = cdv_power_up, .update_wm = cdv_update_wm, + .disable_sr = cdv_disable_sr, }; diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 9e4805627450..705c11d47d45 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h @@ -27,3 +27,4 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device * extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc); +extern void cdv_disable_sr(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index feacbbaabf14..8fbfa06da62d 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -511,7 +511,7 @@ static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) return false; } -void cdv_intel_disable_self_refresh(struct drm_device *dev) +void cdv_disable_sr(struct drm_device *dev) { if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { @@ -534,6 +534,7 @@ void cdv_intel_disable_self_refresh(struct drm_device *dev) void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) { + struct drm_psb_private *dev_priv = dev->dev_private; if (cdv_intel_single_pipe_active(dev)) { u32 fw; @@ -587,8 +588,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) gma_wait_for_vblank(dev); - cdv_intel_disable_self_refresh(dev); - + dev_priv->ops->disable_sr(dev); } } diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index fe972c930c2c..24e8af3d22bf 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -211,7 +211,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) */ if (IS_CDV(dev)) - cdv_intel_disable_self_refresh(dev); + dev_priv->ops->disable_sr(dev); switch (mode) { case DRM_MODE_DPMS_ON: diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 9a4e8941bd25..78b9f986a6e5 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -100,7 +100,4 @@ extern bool gma_pll_is_valid(struct drm_crtc *crtc, extern bool gma_find_best_pll(const struct gma_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, struct gma_clock_t *best_clock); - -/* Cedarview specific functions */ -extern void cdv_intel_disable_self_refresh(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index effd69502be5..b7221f3c40b9 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -696,6 +696,7 @@ struct psb_ops { int (*power_up)(struct drm_device *dev); int (*power_down)(struct drm_device *dev); void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc); + void (*disable_sr)(struct drm_device *dev); void (*lvds_bl_power)(struct drm_device *dev, bool on); #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -- cgit v1.2.3 From cc6a36f2fe06b28c7ea049e6c45622c98358bc00 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 18 Aug 2013 12:54:25 +0200 Subject: drm: DRM should depend on HAS_DMA If NO_DMA=y: drivers/built-in.o: In function `__drm_pci_free': drivers/gpu/drm/drm_pci.c:112: undefined reference to `dma_free_coherent' drivers/built-in.o: In function `drm_pci_alloc': drivers/gpu/drm/drm_pci.c:72: undefined reference to `dma_alloc_coherent' drivers/built-in.o: In function `drm_gem_unmap_dma_buf': drivers/gpu/drm/drm_prime.c:87: undefined reference to `dma_unmap_sg' drivers/built-in.o: In function `drm_gem_map_dma_buf': drivers/gpu/drm/drm_prime.c:78: undefined reference to `dma_map_sg' Signed-off-by: Geert Uytterhoeven Signed-off-by: Dave Airlie --- drivers/gpu/drm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a7c54c843291..626bc0cb1046 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -6,7 +6,7 @@ # menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" - depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU + depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA select HDMI select I2C select I2C_ALGOBIT -- cgit v1.2.3 From 063b472fbb44ac562797a630ac3516720f588140 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 14 Aug 2013 21:43:26 +0200 Subject: drm/i2c: tda998x: fix EDID reading on TDA19988 devices TDA19988 devices need their RAM enabled in order to read EDID information. Add support for this. Signed-off-by: Russell King Signed-off-by: Rob Clark Tested-by: Darren Etheridge Tested-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index e68b58a1aaf9..d71c408916e4 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -229,6 +229,8 @@ struct tda998x_priv { /* Page 12h: HDCP and OTP */ #define REG_TX3 REG(0x12, 0x9a) /* read/write */ +#define REG_TX4 REG(0x12, 0x9b) /* read/write */ +# define TX4_PD_RAM (1 << 1) #define REG_TX33 REG(0x12, 0xb8) /* read/write */ # define TX33_HDMI (1 << 1) @@ -673,6 +675,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) static uint8_t * do_get_edid(struct drm_encoder *encoder) { + struct tda998x_priv *priv = to_tda998x_priv(encoder); int j = 0, valid_extensions = 0; uint8_t *block, *new; bool print_bad_edid = drm_debug & DRM_UT_KMS; @@ -680,6 +683,9 @@ do_get_edid(struct drm_encoder *encoder) if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; + if (priv->rev == TDA19988) + reg_clear(encoder, REG_TX4, TX4_PD_RAM); + /* base block fetch */ if (read_edid_block(encoder, block, 0)) goto fail; @@ -689,7 +695,7 @@ do_get_edid(struct drm_encoder *encoder) /* if there's no extensions, we're done */ if (block[0x7e] == 0) - return block; + goto done; new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) @@ -716,9 +722,15 @@ do_get_edid(struct drm_encoder *encoder) block = new; } +done: + if (priv->rev == TDA19988) + reg_set(encoder, REG_TX4, TX4_PD_RAM); + return block; fail: + if (priv->rev == TDA19988) + reg_set(encoder, REG_TX4, TX4_PD_RAM); dev_warn(encoder->dev->dev, "failed to read EDID\n"); kfree(block); return NULL; -- cgit v1.2.3 From bcb2481df01a9aee7a09b20d43194011edd35754 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 14 Aug 2013 21:43:27 +0200 Subject: drm/i2c: tda998x: ensure VIP output mux is properly set When switching between various drivers for this device, it's possible that some critical registers are left containing values which affect the device operation. One such case encountered is the VIP output mux register. This defaults to 0x24 on powerup, but other drivers may set this to 0x12. This results in incorrect colours. Fix this by ensuring that the register is always set to the power on default setting. Signed-off-by: Russell King Tested-by: Darren Etheridge Tested-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d71c408916e4..cb9b13ad1ae6 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -110,6 +110,7 @@ struct tda998x_priv { #define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */ # define VIP_CNTRL_5_CKCASE (1 << 0) # define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) +#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */ #define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ # define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) # define MAT_CONTRL_MAT_BP (1 << 2) @@ -415,6 +416,9 @@ tda998x_reset(struct drm_encoder *encoder) reg_write(encoder, REG_PLL_SCGR1, 0x5b); reg_write(encoder, REG_PLL_SCGR2, 0x00); reg_write(encoder, REG_PLL_SCG2, 0x10); + + /* Write the default value MUX register */ + reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24); } /* DRM encoder functions */ -- cgit v1.2.3 From 20c17675fea9ddf94b8249a67fb95fd26e732baf Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 14 Aug 2013 21:43:28 +0200 Subject: drm/i2c: tda998x: fix npix/nline programming The npix/nline registers are supposed to be programmed with the total number of pixels/lines, not the displayed pixels/lines, and not minus one either. Signed-off-by: Russell King Tested-by: Darren Etheridge Tested-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index cb9b13ad1ae6..a701411fcc0d 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -587,8 +587,8 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); reg_write(encoder, REG_VIDFORMAT, 0x00); - reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1); - reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1); + reg_write16(encoder, REG_NPIX_MSB, mode->htotal); + reg_write16(encoder, REG_NLINE_MSB, mode->vtotal); reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start); reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end); reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start); -- cgit v1.2.3 From 5e74c22cd1e0f9e49573fe580de47e198ee04975 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 14 Aug 2013 21:43:29 +0200 Subject: drm/i2c: tda998x: prepare for video input configuration The video-input-port (VIP) is highly configurable. This prepares current driver to allow to configure VIP configuration, as some boards connect lcd controller and TDA998x "pin-swapped" and depend on VIP to swap the pins by register configuration. Signed-off-by: Russell King Tested-by: Darren Etheridge Tested-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index a701411fcc0d..527d11b5ff14 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -32,6 +32,9 @@ struct tda998x_priv { uint16_t rev; uint8_t current_page; int dpms; + u8 vip_cntrl_0; + u8 vip_cntrl_1; + u8 vip_cntrl_2; }; #define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) @@ -448,12 +451,9 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) reg_write(encoder, REG_ENA_VP_1, 0xff); reg_write(encoder, REG_ENA_VP_2, 0xff); /* set muxing after enabling ports: */ - reg_write(encoder, REG_VIP_CNTRL_0, - VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3)); - reg_write(encoder, REG_VIP_CNTRL_1, - VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1)); - reg_write(encoder, REG_VIP_CNTRL_2, - VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5)); + reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0); + reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1); + reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); break; case DRM_MODE_DPMS_OFF: /* disable audio and video ports */ @@ -823,6 +823,10 @@ tda998x_encoder_init(struct i2c_client *client, if (!priv) return -ENOMEM; + priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); + priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); + priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); + priv->current_page = 0; priv->cec = i2c_new_dummy(client->adapter, 0x34); priv->dpms = DRM_MODE_DPMS_OFF; -- cgit v1.2.3 From c4c11dd160a8cc98f402c4e12f94b1572e822ffd Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 14 Aug 2013 21:43:30 +0200 Subject: drm/i2c: tda998x: add video and audio input configuration This patch adds tda998x specific parameters to allow it to be configured for different boards using it. Also, this implements rudimentary audio support for S/PDIF attached controllers. Signed-off-by: Russell King Signed-off-by: Sebastian Hesselbarth Tested-by: Darren Etheridge Tested-by: Russell King Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 268 ++++++++++++++++++++++++++++++++++++-- include/drm/i2c/tda998x.h | 30 +++++ 2 files changed, 290 insertions(+), 8 deletions(-) create mode 100644 include/drm/i2c/tda998x.h diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 527d11b5ff14..2b64dfa60205 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -23,7 +23,7 @@ #include #include #include - +#include #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) @@ -32,9 +32,11 @@ struct tda998x_priv { uint16_t rev; uint8_t current_page; int dpms; + bool is_hdmi_sink; u8 vip_cntrl_0; u8 vip_cntrl_1; u8 vip_cntrl_2; + struct tda998x_encoder_params params; }; #define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) @@ -71,10 +73,13 @@ struct tda998x_priv { # define I2C_MASTER_DIS_MM (1 << 0) # define I2C_MASTER_DIS_FILT (1 << 1) # define I2C_MASTER_APP_STRT_LAT (1 << 2) +#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */ +# define FEAT_POWERDOWN_SPDIF (1 << 3) #define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */ #define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */ #define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */ # define INT_FLAGS_2_EDID_BLK_RD (1 << 1) +#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */ #define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */ #define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */ #define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */ @@ -113,6 +118,7 @@ struct tda998x_priv { #define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */ # define VIP_CNTRL_5_CKCASE (1 << 0) # define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) +#define REG_MUX_AP REG(0x00, 0x26) /* read/write */ #define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */ #define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ # define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) @@ -175,6 +181,12 @@ struct tda998x_priv { # define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4) # define HVF_CNTRL_1_SEMI_PLANAR (1 << 6) #define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */ +#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */ +# define I2S_FORMAT(x) (((x) & 3) << 0) +#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */ +# define AIP_CLKSEL_FS(x) (((x) & 3) << 0) +# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2) +# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3) /* Page 02h: PLL settings */ @@ -198,6 +210,12 @@ struct tda998x_priv { #define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */ #define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */ #define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */ +# define AUDIO_DIV_SERCLK_1 0 +# define AUDIO_DIV_SERCLK_2 1 +# define AUDIO_DIV_SERCLK_4 2 +# define AUDIO_DIV_SERCLK_8 3 +# define AUDIO_DIV_SERCLK_16 4 +# define AUDIO_DIV_SERCLK_32 5 #define REG_SEL_CLK REG(0x02, 0x11) /* read/write */ # define SEL_CLK_SEL_CLK1 (1 << 0) # define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1) @@ -216,6 +234,11 @@ struct tda998x_priv { /* Page 10h: information frames and packets */ +#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */ +#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */ +#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */ +#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */ +#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */ /* Page 11h: audio settings and content info packets */ @@ -225,10 +248,33 @@ struct tda998x_priv { # define AIP_CNTRL_0_LAYOUT (1 << 2) # define AIP_CNTRL_0_ACR_MAN (1 << 5) # define AIP_CNTRL_0_RST_CTS (1 << 6) +#define REG_CA_I2S REG(0x11, 0x01) /* read/write */ +# define CA_I2S_CA_I2S(x) (((x) & 31) << 0) +# define CA_I2S_HBR_CHSTAT (1 << 6) +#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */ +#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */ +#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */ +#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */ +#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */ +#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */ +#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */ +#define REG_CTS_N REG(0x11, 0x0c) /* read/write */ +# define CTS_N_K(x) (((x) & 7) << 0) +# define CTS_N_M(x) (((x) & 3) << 4) #define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */ # define ENC_CNTRL_RST_ENC (1 << 0) # define ENC_CNTRL_RST_SEL (1 << 1) # define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2) +#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */ +# define DIP_FLAGS_ACR (1 << 0) +# define DIP_FLAGS_GC (1 << 1) +#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */ +# define DIP_IF_FLAGS_IF1 (1 << 1) +# define DIP_IF_FLAGS_IF2 (1 << 2) +# define DIP_IF_FLAGS_IF3 (1 << 3) +# define DIP_IF_FLAGS_IF4 (1 << 4) +# define DIP_IF_FLAGS_IF5 (1 << 5) +#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */ /* Page 12h: HDCP and OTP */ @@ -344,6 +390,23 @@ fail: return ret; } +static void +reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + uint8_t buf[cnt+1]; + int ret; + + buf[0] = REG2ADDR(reg); + memcpy(&buf[1], p, cnt); + + set_page(encoder, reg); + + ret = i2c_master_send(client, buf, cnt + 1); + if (ret < 0) + dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +} + static uint8_t reg_read(struct drm_encoder *encoder, uint16_t reg) { @@ -412,7 +475,7 @@ tda998x_reset(struct drm_encoder *encoder) reg_write(encoder, REG_SERIALIZER, 0x00); reg_write(encoder, REG_BUFFER_OUT, 0x00); reg_write(encoder, REG_PLL_SCG1, 0x00); - reg_write(encoder, REG_AUDIO_DIV, 0x03); + reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8); reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); reg_write(encoder, REG_PLL_SCGN1, 0xfa); reg_write(encoder, REG_PLL_SCGN2, 0x00); @@ -424,11 +487,184 @@ tda998x_reset(struct drm_encoder *encoder) reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24); } +static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) +{ + uint8_t sum = 0; + + while (bytes--) + sum += *buf++; + return (255 - sum) + 1; +} + +#define HB(x) (x) +#define PB(x) (HB(2) + 1 + (x)) + +static void +tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr, + uint8_t *buf, size_t size) +{ + buf[PB(0)] = tda998x_cksum(buf, size); + + reg_clear(encoder, REG_DIP_IF_FLAGS, bit); + reg_write_range(encoder, addr, buf, size); + reg_set(encoder, REG_DIP_IF_FLAGS, bit); +} + +static void +tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) +{ + uint8_t buf[PB(5) + 1]; + + buf[HB(0)] = 0x84; + buf[HB(1)] = 0x01; + buf[HB(2)] = 10; + buf[PB(0)] = 0; + buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ + buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ + buf[PB(4)] = p->audio_frame[4]; + buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ + + tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, + sizeof(buf)); +} + +static void +tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + uint8_t buf[PB(13) + 1]; + + memset(buf, 0, sizeof(buf)); + buf[HB(0)] = 0x82; + buf[HB(1)] = 0x02; + buf[HB(2)] = 13; + buf[PB(4)] = drm_match_cea_mode(mode); + + tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, + sizeof(buf)); +} + +static void tda998x_audio_mute(struct drm_encoder *encoder, bool on) +{ + if (on) { + reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); + reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); + reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); + } else { + reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); + } +} + +static void +tda998x_configure_audio(struct drm_encoder *encoder, + struct drm_display_mode *mode, struct tda998x_encoder_params *p) +{ + uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv; + uint32_t n; + + /* Enable audio ports */ + reg_write(encoder, REG_ENA_AP, p->audio_cfg); + reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg); + + /* Set audio input source */ + switch (p->audio_format) { + case AFMT_SPDIF: + reg_write(encoder, REG_MUX_AP, 0x40); + clksel_aip = AIP_CLKSEL_AIP(0); + /* FS64SPDIF */ + clksel_fs = AIP_CLKSEL_FS(2); + cts_n = CTS_N_M(3) | CTS_N_K(3); + ca_i2s = 0; + break; + + case AFMT_I2S: + reg_write(encoder, REG_MUX_AP, 0x64); + clksel_aip = AIP_CLKSEL_AIP(1); + /* ACLK */ + clksel_fs = AIP_CLKSEL_FS(0); + cts_n = CTS_N_M(3) | CTS_N_K(3); + ca_i2s = CA_I2S_CA_I2S(0); + break; + } + + reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); + reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT); + + /* Enable automatic CTS generation */ + reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN); + reg_write(encoder, REG_CTS_N, cts_n); + + /* + * Audio input somehow depends on HDMI line rate which is + * related to pixclk. Testing showed that modes with pixclk + * >100MHz need a larger divider while <40MHz need the default. + * There is no detailed info in the datasheet, so we just + * assume 100MHz requires larger divider. + */ + if (mode->clock > 100000) + adiv = AUDIO_DIV_SERCLK_16; + else + adiv = AUDIO_DIV_SERCLK_8; + reg_write(encoder, REG_AUDIO_DIV, adiv); + + /* + * This is the approximate value of N, which happens to be + * the recommended values for non-coherent clocks. + */ + n = 128 * p->audio_sample_rate / 1000; + + /* Write the CTS and N values */ + buf[0] = 0x44; + buf[1] = 0x42; + buf[2] = 0x01; + buf[3] = n; + buf[4] = n >> 8; + buf[5] = n >> 16; + reg_write_range(encoder, REG_ACR_CTS_0, buf, 6); + + /* Set CTS clock reference */ + reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs); + + /* Reset CTS generator */ + reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); + reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); + + /* Write the channel status */ + buf[0] = 0x04; + buf[1] = 0x00; + buf[2] = 0x00; + buf[3] = 0xf1; + reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4); + + tda998x_audio_mute(encoder, true); + mdelay(20); + tda998x_audio_mute(encoder, false); + + /* Write the audio information packet */ + tda998x_write_aif(encoder, p); +} + /* DRM encoder functions */ static void tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) { + struct tda998x_priv *priv = to_tda998x_priv(encoder); + struct tda998x_encoder_params *p = params; + + priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) | + (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) | + VIP_CNTRL_0_SWAP_B(p->swap_b) | + (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0); + priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) | + (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) | + VIP_CNTRL_1_SWAP_D(p->swap_d) | + (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0); + priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) | + (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) | + VIP_CNTRL_2_SWAP_F(p->swap_f) | + (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0); + + priv->params = *p; } static void @@ -445,8 +681,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - /* enable audio and video ports */ - reg_write(encoder, REG_ENA_AP, 0xff); + /* enable video ports, audio will be enabled later */ reg_write(encoder, REG_ENA_VP_0, 0xff); reg_write(encoder, REG_ENA_VP_1, 0xff); reg_write(encoder, REG_ENA_VP_2, 0xff); @@ -608,17 +843,32 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, reg_write16(encoder, REG_REFPIX_MSB, ref_pix); reg_write16(encoder, REG_REFLINE_MSB, ref_line); - reg = TBG_CNTRL_1_VHX_EXT_DE | - TBG_CNTRL_1_VHX_EXT_HS | - TBG_CNTRL_1_VHX_EXT_VS | - TBG_CNTRL_1_DWIN_DIS | /* HDCP off */ + reg = TBG_CNTRL_1_DWIN_DIS | /* HDCP off */ TBG_CNTRL_1_VH_TGL_2; + /* + * It is questionable whether this is correct - the nxp driver + * does not set VH_TGL_2 and the below for all display modes. + */ if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC)) reg |= TBG_CNTRL_1_VH_TGL_0; reg_set(encoder, REG_TBG_CNTRL_1, reg); /* must be last register set: */ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); + + /* Only setup the info frames if the sink is HDMI */ + if (priv->is_hdmi_sink) { + /* We need to turn HDMI HDCP stuff on to get audio through */ + reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); + reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1)); + reg_set(encoder, REG_TX33, TX33_HDMI); + + tda998x_write_avi(encoder, adjusted_mode); + + if (priv->params.audio_cfg) + tda998x_configure_audio(encoder, adjusted_mode, + &priv->params); + } } static enum drm_connector_status @@ -744,12 +994,14 @@ static int tda998x_encoder_get_modes(struct drm_encoder *encoder, struct drm_connector *connector) { + struct tda998x_priv *priv = to_tda998x_priv(encoder); struct edid *edid = (struct edid *)do_get_edid(encoder); int n = 0; if (edid) { drm_mode_connector_update_edid_property(connector, edid); n = drm_add_edid_modes(connector, edid); + priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid); kfree(edid); } diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h new file mode 100644 index 000000000000..3e419d92cf5a --- /dev/null +++ b/include/drm/i2c/tda998x.h @@ -0,0 +1,30 @@ +#ifndef __DRM_I2C_TDA998X_H__ +#define __DRM_I2C_TDA998X_H__ + +struct tda998x_encoder_params { + u8 swap_b:3; + u8 mirr_b:1; + u8 swap_a:3; + u8 mirr_a:1; + u8 swap_d:3; + u8 mirr_d:1; + u8 swap_c:3; + u8 mirr_c:1; + u8 swap_f:3; + u8 mirr_f:1; + u8 swap_e:3; + u8 mirr_e:1; + + u8 audio_cfg; + u8 audio_clk_cfg; + u8 audio_frame[6]; + + enum { + AFMT_SPDIF, + AFMT_I2S + } audio_format; + + unsigned audio_sample_rate; +}; + +#endif -- cgit v1.2.3 From 088d61d1fdfde56850c157138a6dc08880c1853d Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 14 Aug 2013 21:43:31 +0200 Subject: drm/i2c: tda998x: fix sync generation and calculation This fixes the wrong sync generation and sync calculation of TDA998x for HS/VS-based sync detection. Signed-off-by: Sebastian Hesselbarth Tested-by: Darren Etheridge Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 181 ++++++++++++++++++++++++-------------- 1 file changed, 115 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 2b64dfa60205..92fcb3deae22 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -140,8 +140,12 @@ struct tda998x_priv { #define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */ #define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */ #define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */ +#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */ +#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */ #define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */ #define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */ +#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */ +#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */ #define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */ #define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */ #define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */ @@ -152,21 +156,29 @@ struct tda998x_priv { #define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */ #define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */ #define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */ +#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */ +#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */ +#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */ +#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */ #define REG_DE_START_MSB REG(0x00, 0xc5) /* write */ #define REG_DE_START_LSB REG(0x00, 0xc6) /* write */ #define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */ #define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */ #define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */ +# define TBG_CNTRL_0_TOP_TGL (1 << 0) +# define TBG_CNTRL_0_TOP_SEL (1 << 1) +# define TBG_CNTRL_0_DE_EXT (1 << 2) +# define TBG_CNTRL_0_TOP_EXT (1 << 3) # define TBG_CNTRL_0_FRAME_DIS (1 << 5) # define TBG_CNTRL_0_SYNC_MTHD (1 << 6) # define TBG_CNTRL_0_SYNC_ONCE (1 << 7) #define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */ -# define TBG_CNTRL_1_VH_TGL_0 (1 << 0) -# define TBG_CNTRL_1_VH_TGL_1 (1 << 1) -# define TBG_CNTRL_1_VH_TGL_2 (1 << 2) -# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3) -# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4) -# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5) +# define TBG_CNTRL_1_H_TGL (1 << 0) +# define TBG_CNTRL_1_V_TGL (1 << 1) +# define TBG_CNTRL_1_TGL_EN (1 << 2) +# define TBG_CNTRL_1_X_EXT (1 << 3) +# define TBG_CNTRL_1_H_EXT (1 << 4) +# define TBG_CNTRL_1_V_EXT (1 << 5) # define TBG_CNTRL_1_DWIN_DIS (1 << 6) #define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */ #define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */ @@ -735,43 +747,70 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct tda998x_priv *priv = to_tda998x_priv(encoder); - uint16_t hs_start, hs_end, line_start, line_end; - uint16_t vwin_start, vwin_end, de_start, de_end; - uint16_t ref_pix, ref_line, pix_start2; + uint16_t ref_pix, ref_line, n_pix, n_line; + uint16_t hs_pix_s, hs_pix_e; + uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e; + uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e; + uint16_t vwin1_line_s, vwin1_line_e; + uint16_t vwin2_line_s, vwin2_line_e; + uint16_t de_pix_s, de_pix_e; uint8_t reg, div, rep; - hs_start = mode->hsync_start - mode->hdisplay; - hs_end = mode->hsync_end - mode->hdisplay; - line_start = 1; - line_end = 1 + mode->vsync_end - mode->vsync_start; - vwin_start = mode->vtotal - mode->vsync_start; - vwin_end = vwin_start + mode->vdisplay; - de_start = mode->htotal - mode->hdisplay; - de_end = mode->htotal; - - pix_start2 = 0; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - pix_start2 = (mode->htotal / 2) + hs_start; - - /* TODO how is this value calculated? It is 2 for all common - * formats in the tables in out of tree nxp driver (assuming - * I've properly deciphered their byzantine table system) + /* + * Internally TDA998x is using ITU-R BT.656 style sync but + * we get VESA style sync. TDA998x is using a reference pixel + * relative to ITU to sync to the input frame and for output + * sync generation. Currently, we are using reference detection + * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point + * which is position of rising VS with coincident rising HS. + * + * Now there is some issues to take care of: + * - HDMI data islands require sync-before-active + * - TDA998x register values must be > 0 to be enabled + * - REFLINE needs an additional offset of +1 + * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB + * + * So we add +1 to all horizontal and vertical register values, + * plus an additional +3 for REFPIX as we are using RGB input only. */ - ref_line = 2; - - /* this might changes for other color formats from the CRTC: */ - ref_pix = 3 + hs_start; + n_pix = mode->htotal; + n_line = mode->vtotal; + + hs_pix_e = mode->hsync_end - mode->hdisplay; + hs_pix_s = mode->hsync_start - mode->hdisplay; + de_pix_e = mode->htotal; + de_pix_s = mode->htotal - mode->hdisplay; + ref_pix = 3 + hs_pix_s; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) { + ref_line = 1 + mode->vsync_start - mode->vdisplay; + vwin1_line_s = mode->vtotal - mode->vdisplay - 1; + vwin1_line_e = vwin1_line_s + mode->vdisplay; + vs1_pix_s = vs1_pix_e = hs_pix_s; + vs1_line_s = mode->vsync_start - mode->vdisplay; + vs1_line_e = vs1_line_s + + mode->vsync_end - mode->vsync_start; + vwin2_line_s = vwin2_line_e = 0; + vs2_pix_s = vs2_pix_e = 0; + vs2_line_s = vs2_line_e = 0; + } else { + ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2; + vwin1_line_s = (mode->vtotal - mode->vdisplay)/2; + vwin1_line_e = vwin1_line_s + mode->vdisplay/2; + vs1_pix_s = vs1_pix_e = hs_pix_s; + vs1_line_s = (mode->vsync_start - mode->vdisplay)/2; + vs1_line_e = vs1_line_s + + (mode->vsync_end - mode->vsync_start)/2; + vwin2_line_s = vwin1_line_s + mode->vtotal/2; + vwin2_line_e = vwin2_line_s + mode->vdisplay/2; + vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2; + vs2_line_s = vs1_line_s + mode->vtotal/2 ; + vs2_line_e = vs2_line_s + + (mode->vsync_end - mode->vsync_start)/2; + } div = 148500 / mode->clock; - DBG("clock=%d, div=%u", mode->clock, div); - DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u", - hs_start, hs_end, line_start, line_end); - DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u", - vwin_start, vwin_end, de_start, de_end); - DBG("ref_line=%u, ref_pix=%u, pix_start2=%u", - ref_line, ref_pix, pix_start2); - /* mute the audio FIFO: */ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); @@ -802,9 +841,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | PLL_SERIAL_2_SRL_PR(rep)); - reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2); - reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2); - /* set color matrix bypass flag: */ reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); @@ -813,46 +849,59 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); + /* + * Sync on rising HSYNC/VSYNC + */ reg_write(encoder, REG_VIP_CNTRL_3, 0); reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS); + + /* + * TDA19988 requires high-active sync at input stage, + * so invert low-active sync provided by master encoder here + */ + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); if (mode->flags & DRM_MODE_FLAG_NVSYNC) reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); + /* + * Always generate sync polarity relative to input sync and + * revert input stage toggled sync at output stage + */ + reg = TBG_CNTRL_1_TGL_EN; if (mode->flags & DRM_MODE_FLAG_NHSYNC) - reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); + reg |= TBG_CNTRL_1_H_TGL; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + reg |= TBG_CNTRL_1_V_TGL; + reg_write(encoder, REG_TBG_CNTRL_1, reg); reg_write(encoder, REG_VIDFORMAT, 0x00); - reg_write16(encoder, REG_NPIX_MSB, mode->htotal); - reg_write16(encoder, REG_NLINE_MSB, mode->vtotal); - reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start); - reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end); - reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start); - reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start); - reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start); - reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end); - reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start); - reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end); - reg_write16(encoder, REG_DE_START_MSB, de_start); - reg_write16(encoder, REG_DE_STOP_MSB, de_end); + reg_write16(encoder, REG_REFPIX_MSB, ref_pix); + reg_write16(encoder, REG_REFLINE_MSB, ref_line); + reg_write16(encoder, REG_NPIX_MSB, n_pix); + reg_write16(encoder, REG_NLINE_MSB, n_line); + reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s); + reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s); + reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e); + reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e); + reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s); + reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s); + reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e); + reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e); + reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s); + reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e); + reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s); + reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e); + reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s); + reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e); + reg_write16(encoder, REG_DE_START_MSB, de_pix_s); + reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e); if (priv->rev == TDA19988) { /* let incoming pixels fill the active space (if any) */ reg_write(encoder, REG_ENABLE_SPACE, 0x01); } - reg_write16(encoder, REG_REFPIX_MSB, ref_pix); - reg_write16(encoder, REG_REFLINE_MSB, ref_line); - - reg = TBG_CNTRL_1_DWIN_DIS | /* HDCP off */ - TBG_CNTRL_1_VH_TGL_2; - /* - * It is questionable whether this is correct - the nxp driver - * does not set VH_TGL_2 and the below for all display modes. - */ - if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC)) - reg |= TBG_CNTRL_1_VH_TGL_0; - reg_set(encoder, REG_TBG_CNTRL_1, reg); - /* must be last register set: */ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); -- cgit v1.2.3 From 179f1aa407b466c06a94f9e54abc948d1e1146e7 Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 14 Aug 2013 21:43:32 +0200 Subject: drm/i2c: tda998x: prepare for broken sync workaround Some LCD controller cannot provide valid VESA style sync, i.e. coincident HS/VS edges. First, this patch adds hskew passed from the adjusted_mode to reference pixel calculation to allow those controllers to add an offset relative to the expected reference pixel. Signed-off-by: Darren Etheridge Signed-off-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 92fcb3deae22..c2bd711e86e9 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -782,6 +782,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, de_pix_s = mode->htotal - mode->hdisplay; ref_pix = 3 + hs_pix_s; + /* + * Attached LCD controllers may generate broken sync. Allow + * those to adjust the position of the rising VS edge by adding + * HSKEW to ref_pix. + */ + if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW) + ref_pix += adjusted_mode->hskew; + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) { ref_line = 1 + mode->vsync_start - mode->vdisplay; vwin1_line_s = mode->vtotal - mode->vdisplay - 1; -- cgit v1.2.3 From a9767188678725aac99d7990025dd5b822728ba8 Mon Sep 17 00:00:00 2001 From: Darren Etheridge Date: Wed, 14 Aug 2013 21:43:33 +0200 Subject: drm/tilcdc fixup mode to workaround sync for tda998x Add a fixup function that will flip the hsync priority and add a hskew value that is used to shift the tda998x to the right by a variable number of pixels depending on the mode. This works around an issue with the sync timings that tilcdc is outputing. Signed-off-by: Darren Etheridge Tested-by: Sebastian Hesselbarth Tested-by: Russell King Signed-off-by: Dave Airlie --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 7 ++++++- drivers/gpu/drm/tilcdc/tilcdc_slave.c | 27 ++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 7418dcd986d3..6d0524095fe3 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -379,7 +379,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); - if (mode->flags & DRM_MODE_FLAG_NHSYNC) + /* + * use value from adjusted_mode here as this might have been + * changed as part of the fixup for slave encoders to solve the + * issue where tilcdc timings are not VESA compliant + */ + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c index dfffaf014022..23b3203d8241 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c @@ -73,13 +73,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder) tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info); } +static bool slave_encoder_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* + * tilcdc does not generate VESA-complient sync but aligns + * VS on the second edge of HS instead of first edge. + * We use adjusted_mode, to fixup sync by aligning both rising + * edges and add HSKEW offset to let the slave encoder fix it up. + */ + adjusted_mode->hskew = mode->hsync_end - mode->hsync_start; + adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) { + adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC; + adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC; + } else { + adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC; + adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC; + } + + return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode); +} + + static const struct drm_encoder_funcs slave_encoder_funcs = { .destroy = slave_encoder_destroy, }; static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = { .dpms = drm_i2c_encoder_dpms, - .mode_fixup = drm_i2c_encoder_mode_fixup, + .mode_fixup = slave_encoder_fixup, .prepare = slave_encoder_prepare, .commit = drm_i2c_encoder_commit, .mode_set = drm_i2c_encoder_mode_set, -- cgit v1.2.3 From 7e0e6cbd0361dfeb2507fe0f3a8dd16e78ffeeb9 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 14 Aug 2013 15:07:14 +0200 Subject: drm/ast: remove unused driver_private access gem_bo->driver_private is never read by ast nor DRM core. No need to set it. Besides, drm core clears it during setup, anyway. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..cf1c833f73ca 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -321,7 +321,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align, return ret; } - astbo->gem.driver_private = NULL; astbo->bo.bdev = &ast->ttm.bdev; ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); -- cgit v1.2.3 From 7d2e968e2bea38b38f24e65892411a9095711f77 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 14 Aug 2013 15:07:15 +0200 Subject: drm/mgag200: remove unused driver_private access gem_bo->driver_private is never read by mgag200 nor DRM core. No need to set it. Besides, drm core clears it during setup, anyway. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/mgag200/mgag200_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 3acb2b044c7b..6cf3fa0b35cc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -321,7 +321,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, return ret; } - mgabo->gem.driver_private = NULL; mgabo->bo.bdev = &mdev->ttm.bdev; mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); -- cgit v1.2.3 From 23a9a2e075fa4a8a46575977ed2cc531424a6d2c Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 14 Aug 2013 15:07:16 +0200 Subject: drm/cirrus: remove unused driver_private access gem_bo->driver_private is never read by cirrus nor DRM core. No need to set it. Besides, drm core clears it during setup, anyway. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/cirrus/cirrus_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..bf8a50669489 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -326,7 +326,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, return ret; } - cirrusbo->gem.driver_private = NULL; cirrusbo->bo.bdev = &cirrus->ttm.bdev; cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); -- cgit v1.2.3 From f547b22aaf4f429089b3d5a987b94a2bd8c92eb1 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 14 Aug 2013 15:07:17 +0200 Subject: drm/qxl: remove unused object_pin/unpin() helpers These two helpers are unused. Remove them. They rely on gem_obj->driver_private, which is set to NULL during setup. As this field isn't used by the driver, anymore, we can remove this assignment as well. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/qxl/qxl_drv.h | 3 --- drivers/gpu/drm/qxl/qxl_gem.c | 26 -------------------------- drivers/gpu/drm/qxl/qxl_object.c | 1 - 3 files changed, 30 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 4708621fe720..f7c9adde46a0 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -405,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, bool discardable, bool kernel, struct qxl_surface *surf, struct drm_gem_object **obj); -int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, - uint64_t *gpu_addr); -void qxl_gem_object_unpin(struct drm_gem_object *obj); int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index 25e1777fb0a2..1648e4125af7 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, return 0; } -int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, - uint64_t *gpu_addr) -{ - struct qxl_bo *qobj = obj->driver_private; - int r; - - r = qxl_bo_reserve(qobj, false); - if (unlikely(r != 0)) - return r; - r = qxl_bo_pin(qobj, pin_domain, gpu_addr); - qxl_bo_unreserve(qobj); - return r; -} - -void qxl_gem_object_unpin(struct drm_gem_object *obj) -{ - struct qxl_bo *qobj = obj->driver_private; - int r; - - r = qxl_bo_reserve(qobj, false); - if (likely(r == 0)) { - qxl_bo_unpin(qobj); - qxl_bo_unreserve(qobj); - } -} - int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { return 0; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index aa161cddd87e..8691c76c5ef0 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -98,7 +98,6 @@ int qxl_bo_create(struct qxl_device *qdev, kfree(bo); return r; } - bo->gem_base.driver_private = NULL; bo->type = domain; bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; -- cgit v1.2.3 From e552df37ad589f75af969d536ee9c70c94cacccf Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 14 Aug 2013 15:07:18 +0200 Subject: drm/radeon: remove stale gem->driver_private access This field is never read. No need to set it in radeon. Besides, DRM gem core clears it during setup, anyway. Signed-off-by: David Herrmann Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 1 - drivers/gpu/drm/radeon/radeon_prime.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 2020bf4a3830..c0fa4aa9ceea 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev, return r; } bo->rdev = rdev; - bo->gem_base.driver_private = NULL; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 65b9eabd5a2f..20074560fc25 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, RADEON_GEM_DOMAIN_GTT, sg, &bo); if (ret) return ERR_PTR(ret); - bo->gem_base.driver_private = bo; mutex_lock(&rdev->gem.mutex); list_add_tail(&bo->list, &rdev->gem.objects); -- cgit v1.2.3 From 5c692948d8c9026f9477ff416465a81a3d9e06a8 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 15 Aug 2013 16:03:17 +0200 Subject: drm/ttm: kill unused functions Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 154 ---------------------------------------- 1 file changed, 154 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8c0e2c020215..1006c15445e9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -290,157 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) return 0; } EXPORT_SYMBOL(ttm_fbdev_mmap); - - -ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, - const char __user *wbuf, char __user *rbuf, size_t count, - loff_t *f_pos, bool write) -{ - struct ttm_buffer_object *bo; - struct ttm_bo_driver *driver; - struct ttm_bo_kmap_obj map; - unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); - unsigned long kmap_offset; - unsigned long kmap_end; - unsigned long kmap_num; - size_t io_size; - unsigned int page_offset; - char *virtual; - int ret; - bool no_wait = false; - bool dummy; - - bo = ttm_bo_vm_lookup(bdev, dev_offset, 1); - if (unlikely(bo == NULL)) - return -EFAULT; - - driver = bo->bdev->driver; - if (unlikely(!driver->verify_access)) { - ret = -EPERM; - goto out_unref; - } - - ret = driver->verify_access(bo, filp); - if (unlikely(ret != 0)) - goto out_unref; - - kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node); - if (unlikely(kmap_offset >= bo->num_pages)) { - ret = -EFBIG; - goto out_unref; - } - - page_offset = *f_pos & ~PAGE_MASK; - io_size = bo->num_pages - kmap_offset; - io_size = (io_size << PAGE_SHIFT) - page_offset; - if (count < io_size) - io_size = count; - - kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; - kmap_num = kmap_end - kmap_offset + 1; - - ret = ttm_bo_reserve(bo, true, no_wait, false, 0); - - switch (ret) { - case 0: - break; - case -EBUSY: - ret = -EAGAIN; - goto out_unref; - default: - goto out_unref; - } - - ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) { - ttm_bo_unreserve(bo); - goto out_unref; - } - - virtual = ttm_kmap_obj_virtual(&map, &dummy); - virtual += page_offset; - - if (write) - ret = copy_from_user(virtual, wbuf, io_size); - else - ret = copy_to_user(rbuf, virtual, io_size); - - ttm_bo_kunmap(&map); - ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); - - if (unlikely(ret != 0)) - return -EFBIG; - - *f_pos += io_size; - - return io_size; -out_unref: - ttm_bo_unref(&bo); - return ret; -} - -ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, - char __user *rbuf, size_t count, loff_t *f_pos, - bool write) -{ - struct ttm_bo_kmap_obj map; - unsigned long kmap_offset; - unsigned long kmap_end; - unsigned long kmap_num; - size_t io_size; - unsigned int page_offset; - char *virtual; - int ret; - bool no_wait = false; - bool dummy; - - kmap_offset = (*f_pos >> PAGE_SHIFT); - if (unlikely(kmap_offset >= bo->num_pages)) - return -EFBIG; - - page_offset = *f_pos & ~PAGE_MASK; - io_size = bo->num_pages - kmap_offset; - io_size = (io_size << PAGE_SHIFT) - page_offset; - if (count < io_size) - io_size = count; - - kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; - kmap_num = kmap_end - kmap_offset + 1; - - ret = ttm_bo_reserve(bo, true, no_wait, false, 0); - - switch (ret) { - case 0: - break; - case -EBUSY: - return -EAGAIN; - default: - return ret; - } - - ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) { - ttm_bo_unreserve(bo); - return ret; - } - - virtual = ttm_kmap_obj_virtual(&map, &dummy); - virtual += page_offset; - - if (write) - ret = copy_from_user(virtual, wbuf, io_size); - else - ret = copy_to_user(rbuf, virtual, io_size); - - ttm_bo_kunmap(&map); - ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); - - if (unlikely(ret != 0)) - return ret; - - *f_pos += io_size; - - return io_size; -} -- cgit v1.2.3 From 161695bf648da484e048b52da7bee72db4aa13fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:11 +0200 Subject: drm/omap: kill firstopen callback KMS drivers really shouldn't need to do anything on firstopen, so kill empty callbacks. Signed-off-by: Daniel Vetter Acked-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/omapdrm/omap_drv.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2f9e22e22bd4..47e64f916254 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file) return 0; } -static int dev_firstopen(struct drm_device *dev) -{ - DBG("firstopen: dev=%p", dev); - return 0; -} - /** * lastclose - clean up after all DRM clients have exited * @dev: DRM device @@ -609,7 +603,6 @@ static struct drm_driver omap_drm_driver = { .load = dev_load, .unload = dev_unload, .open = dev_open, - .firstopen = dev_firstopen, .lastclose = dev_lastclose, .preclose = dev_preclose, .postclose = dev_postclose, -- cgit v1.2.3 From d8ed16884afc536da6ad480a135d2a54940224d6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:12 +0200 Subject: drm/radeon: kill firstopen callback for kms driver Again, it does nothing. Signed-off-by: Daniel Vetter Reviewed-by: Eric Anholt Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/radeon/radeon_kms.c | 13 ------------- 2 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index fa7a7e13da6c..3585f22c5c15 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -81,7 +81,6 @@ #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); -int radeon_driver_firstopen_kms(struct drm_device *dev); void radeon_driver_lastclose_kms(struct drm_device *dev); int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void radeon_driver_postclose_kms(struct drm_device *dev, @@ -390,7 +389,6 @@ static struct drm_driver kms_driver = { DRIVER_PRIME, .dev_priv_size = 0, .load = radeon_driver_load_kms, - .firstopen = radeon_driver_firstopen_kms, .open = radeon_driver_open_kms, .preclose = radeon_driver_preclose_kms, .postclose = radeon_driver_postclose_kms, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 866c2b70aa6f..b46a5616664a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -448,19 +448,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /* * Outdated mess for old drm with Xorg being in charge (void function now). */ -/** - * radeon_driver_firstopen_kms - drm callback for first open - * - * @dev: drm dev pointer - * - * Nothing to be done for KMS (all asics). - * Returns 0 on success. - */ -int radeon_driver_firstopen_kms(struct drm_device *dev) -{ - return 0; -} - /** * radeon_driver_firstopen_kms - drm callback for last close * -- cgit v1.2.3 From 45886af246d926304d5e990da63d55d9db3216c0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:16 +0200 Subject: drm: kill dev->driver->set_version Totally unused, so just rip it out. Anyway, we want drivers to be fully backwards compatible, allowing them to change behaviour is just a recipe for them to break badly. Reviewed-by: Eric Anholt Reviewed-by: Rob Clark Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_ioctl.c | 3 --- include/drm/drmP.h | 2 -- 2 files changed, 5 deletions(-) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ffd7a7ba70d4..0acf0807d1ad 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -352,9 +352,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri retcode = -EINVAL; goto done; } - - if (dev->driver->set_version) - dev->driver->set_version(dev, sv); } done: diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 3ecdde6274be..813ca1d11728 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -883,8 +883,6 @@ struct drm_driver { void (*irq_preinstall) (struct drm_device *dev); int (*irq_postinstall) (struct drm_device *dev); void (*irq_uninstall) (struct drm_device *dev); - void (*set_version) (struct drm_device *dev, - struct drm_set_version *sv); /* Master routines */ int (*master_create)(struct drm_device *dev, struct drm_master *master); -- cgit v1.2.3 From 3d914e8357256e7e92d1b7dd2dda9cf94e39c4e8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:17 +0200 Subject: drm: hide legacy sg cleanup better from common code I've decided that some clear markers for what's legacy dri1/non-gem code is useful. I've opted to use the drm_legacy prefix and then hide all the checks in that function for better readability in the common code. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 6 +----- drivers/gpu/drm/drm_scatter.c | 10 +++++++++- include/drm/drmP.h | 2 +- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index dddd79988ffc..743b24d7918e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -197,11 +197,7 @@ int drm_lastclose(struct drm_device * dev) drm_agp_clear(dev); - if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && - !drm_core_check_feature(dev, DRIVER_MODESET)) { - drm_sg_cleanup(dev->sg); - dev->sg = NULL; - } + drm_legacy_sg_cleanup(dev); /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index a4a076ff1757..dd8a6480065c 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size) #endif } -void drm_sg_cleanup(struct drm_sg_mem * entry) +static void drm_sg_cleanup(struct drm_sg_mem * entry) { struct page *page; int i; @@ -64,6 +64,14 @@ void drm_sg_cleanup(struct drm_sg_mem * entry) kfree(entry); } +void drm_legacy_sg_cleanup(struct drm_device *dev) +{ + if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && + !drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_sg_cleanup(dev->sg); + dev->sg = NULL; + } +} #ifdef _LP64 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) #else diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 813ca1d11728..10d9f83a9df0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1522,7 +1522,7 @@ extern int drm_vma_info(struct seq_file *m, void *data); #endif /* Scatter Gather Support (drm_scatter.h) */ -extern void drm_sg_cleanup(struct drm_sg_mem * entry); +extern void drm_legacy_sg_cleanup(struct drm_device *dev); extern int drm_sg_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_sg_free(struct drm_device *dev, void *data, -- cgit v1.2.3 From 8e194bbf96d81c651c52b82e125be1264db0cd4e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:18 +0200 Subject: drm: disallow legacy sg ioctls for modesetting drivers Only the radeon/r128/ati ums drivers use this. Furthermore the cleanup was already only done for UMS drivers. Also a quick check of the ATI ddx git history shows that only the UMS code ever used this facility. So we can safely disallow these pair of ioctls for modesetting drivers. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_scatter.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index dd8a6480065c..1c78406f6e71 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -87,6 +87,9 @@ int drm_sg_alloc(struct drm_device *dev, void *data, DRM_DEBUG("\n"); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; @@ -197,6 +200,9 @@ int drm_sg_free(struct drm_device *dev, void *data, struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; -- cgit v1.2.3 From e2e99a8206bcce6f2d3d72ff8be42859f98dbcda Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:19 +0200 Subject: drm: mark dma setup/teardown as legacy systems And hide the checks a bit better. This was already disallowed for modesetting drivers, so no functinal change here. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_dma.c | 17 +++++++++++++++-- drivers/gpu/drm/drm_drv.c | 4 +--- drivers/gpu/drm/drm_fops.c | 12 +++--------- include/drm/drmP.h | 4 ++-- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index 495b5fd2787c..8a140a953754 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -44,10 +44,18 @@ * * Allocate and initialize a drm_device_dma structure. */ -int drm_dma_setup(struct drm_device *dev) +int drm_legacy_dma_setup(struct drm_device *dev) { int i; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || + drm_core_check_feature(dev, DRIVER_MODESET)) { + return 0; + } + + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); if (!dev->dma) return -ENOMEM; @@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev) * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the drm_device::dma structure itself. */ -void drm_dma_takedown(struct drm_device *dev) +void drm_legacy_dma_takedown(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i, j; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || + drm_core_check_feature(dev, DRIVER_MODESET)) { + return; + } + if (!dma) return; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 743b24d7918e..5b949a736712 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -205,9 +205,7 @@ int drm_lastclose(struct drm_device * dev) kfree(vma); } - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && - !drm_core_check_feature(dev, DRIVER_MODESET)) - drm_dma_takedown(dev); + drm_legacy_dma_takedown(dev); dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 72acae908a7d..f343234bd831 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -60,15 +60,9 @@ static int drm_setup(struct drm_device * dev) atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && - !drm_core_check_feature(dev, DRIVER_MODESET)) { - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); - - i = drm_dma_setup(dev); - if (i < 0) - return i; - } + i = drm_legacy_dma_setup(dev); + if (i < 0) + return i; for (i = 0; i < ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 10d9f83a9df0..1da25304c289 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1374,8 +1374,8 @@ extern int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); /* DMA support (drm_dma.h) */ -extern int drm_dma_setup(struct drm_device *dev); -extern void drm_dma_takedown(struct drm_device *dev); +extern int drm_legacy_dma_setup(struct drm_device *dev); +extern void drm_legacy_dma_takedown(struct drm_device *dev); extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); extern void drm_core_reclaim_buffers(struct drm_device *dev, struct drm_file *filp); -- cgit v1.2.3 From 8d38c4b4371b8f9d1d72737c880cdae14b024142 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:20 +0200 Subject: drm: disallow legacy dma ioctls for modesetting drivers Now only legacy ums drivers have the DRIVER_HAVE_DMA driver feature flag set, so strictly speaking the modesetting check is redundant. But adding it has the upside that it makes it very clear that the dma support is legacy stuff. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index bef4abff8fa3..cccc25f7ef86 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1305,6 +1305,9 @@ int drm_addbufs(struct drm_device *dev, void *data, struct drm_buf_desc *request = data; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1348,6 +1351,9 @@ int drm_infobufs(struct drm_device *dev, void *data, int i; int count; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1427,6 +1433,9 @@ int drm_markbufs(struct drm_device *dev, void *data, int order; struct drm_buf_entry *entry; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1472,6 +1481,9 @@ int drm_freebufs(struct drm_device *dev, void *data, int idx; struct drm_buf *buf; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1524,6 +1536,9 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_buf_map *request = data; int i; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; -- cgit v1.2.3 From 7c510133d93dd6f15ca040733ba7b2891ed61fd1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:21 +0200 Subject: drm: mark context support as a legacy subsystem So after a lot of digging around in git histories it looks like this has only ever be used by dri1 render clients. Hence we can fully disable the entire thing for modesetting drivers and so greatly reduce the attack surface for potential exploits (or at least tools like trinity ...). Also add the drm_legacy prefix for functions which are called from common code. To further reduce the impact on common code also extract all the ctx release handling into a function (instead of only releasing individual handles) and make ctxbitmap_cleanup return void - it can never fail. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_context.c | 73 ++++++++++++++++++++++++++++++++++++++----- drivers/gpu/drm/drm_fops.c | 21 +------------ drivers/gpu/drm/drm_stub.c | 10 ++---- include/drm/drmP.h | 7 +++-- 4 files changed, 72 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 224ff965bcf7..b4fb86d89850 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -42,10 +42,6 @@ #include -/******************************************************************/ -/** \name Context bitmap support */ -/*@{*/ - /** * Free a handle from the context bitmap. * @@ -56,13 +52,48 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) +static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); } +/******************************************************************/ +/** \name Context bitmap support */ +/*@{*/ + +void drm_legacy_ctxbitmap_release(struct drm_device *dev, + struct drm_file *file_priv) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->ctxlist_mutex); + if (!list_empty(&dev->ctxlist)) { + struct drm_ctx_list *pos, *n; + + list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { + if (pos->tag == file_priv && + pos->handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_dtor) + dev->driver->context_dtor(dev, + pos->handle); + + drm_ctxbitmap_free(dev, pos->handle); + + list_del(&pos->head); + kfree(pos); + --dev->ctx_count; + } + } + } + mutex_unlock(&dev->ctxlist_mutex); +} + /** * Context bitmap allocation. * @@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev) * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(struct drm_device * dev) +void drm_legacy_ctxbitmap_init(struct drm_device * dev) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + idr_init(&dev->ctx_idr); - return 0; } /** @@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(struct drm_device * dev) +void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) { mutex_lock(&dev->struct_mutex); idr_destroy(&dev->ctx_idr); @@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data, struct drm_local_map *map; struct drm_map_list *_entry; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->struct_mutex); map = idr_find(&dev->ctx_idr, request->ctx_id); @@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data, struct drm_local_map *map = NULL; struct drm_map_list *r_list = NULL; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map @@ -280,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data, struct drm_ctx ctx; int i; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { @@ -310,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data, struct drm_ctx_list *ctx_entry; struct drm_ctx *ctx = data; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + ctx->handle = drm_ctxbitmap_next(dev); if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ @@ -353,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx *ctx = data; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + /* This is 0, because we don't handle any context flags */ ctx->flags = 0; @@ -375,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); return drm_context_switch(dev, dev->last_context, ctx->handle); } @@ -395,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); drm_context_switch_complete(dev, file_priv, ctx->handle); @@ -417,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f343234bd831..10334999f229 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -474,26 +474,7 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { - struct drm_ctx_list *pos, *n; - - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->tag == file_priv && - pos->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, - pos->handle); - - drm_ctxbitmap_free(dev, pos->handle); - - list_del(&pos->head); - kfree(pos); - --dev->ctx_count; - } - } - } - mutex_unlock(&dev->ctxlist_mutex); + drm_legacy_ctxbitmap_release(dev, file_priv); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index d663f7d66dab..aa0664d91060 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -288,13 +288,7 @@ int drm_fill_in_dev(struct drm_device *dev, goto error_out_unreg; } - - - retcode = drm_ctxbitmap_init(dev); - if (retcode) { - DRM_ERROR("Cannot allocate memory for context bitmap.\n"); - goto error_out_unreg; - } + drm_legacy_ctxbitmap_init(dev); if (driver->driver_features & DRIVER_GEM) { retcode = drm_gem_init(dev); @@ -463,7 +457,7 @@ void drm_put_dev(struct drm_device *dev) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); - drm_ctxbitmap_cleanup(dev); + drm_legacy_ctxbitmap_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1da25304c289..277f307e053d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1313,9 +1313,10 @@ extern int drm_newctx(struct drm_device *dev, void *data, extern int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_ctxbitmap_init(struct drm_device *dev); -extern void drm_ctxbitmap_cleanup(struct drm_device *dev); -extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); +extern void drm_legacy_ctxbitmap_init(struct drm_device *dev); +extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); +extern void drm_legacy_ctxbitmap_release(struct drm_device *dev, + struct drm_file *file_priv); extern int drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From 5bbd533248f653fcfd8de0f1202e2c67d8f884a4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:22 +0200 Subject: drm/vmwgfx: remove redundant clearing of driver->dma_quiescent It's kzalloced ... Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 50861504b5d9..81ef6bac8737 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1143,7 +1143,6 @@ static struct drm_driver driver = { .disable_vblank = vmw_disable_vblank, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), - .dma_quiescent = NULL, /*vmw_dma_quiescent, */ .master_create = vmw_master_create, .master_destroy = vmw_master_destroy, .master_set = vmw_master_set, -- cgit v1.2.3 From b0e898ac555e96e7863a5ee95d70f3625f1db5e2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:23 +0200 Subject: drm: remove FASYNC support So I've stumbled over drm_fasync and wondered what it does. Digging that up is quite a story. First I've had to read up on what this does and ended up being rather bewildered why peopled loved signals so much back in the days that they've created SIGIO just for that ... Then I wondered how this ever works, and what that strange "No-op." comment right above it should mean. After all calling the core fasync helper is pretty obviously not a noop. After reading through the kernels FASYNC implementation I've noticed that signals are only sent out to the processes attached with FASYNC by calling kill_fasync. No merged drm driver has ever done that. After more digging I've found out that the only driver that ever used this is the so called GAMMA driver. I've frankly never heard of such a gpu brand ever before. Now FASYNC seems to not have been the only bad thing with that driver, since Dave Airlie removed it from the drm driver with prejudice: commit 1430163b4bbf7b00367ea1066c1c5fe85dbeefed Author: Dave Airlie Date: Sun Aug 29 12:04:35 2004 +0000 Drop GAMMA DRM from a great height ... Long story short, the drm fasync support seems to be doing absolutely nothing. And the only user of it was never merged into the upstream kernel. And we don't need any fops->fasync callback since the fcntl implementation in the kernel already implements the noop case correctly. So stop this particular cargo-cult and rip it all out. v2: Kill drm_fasync assignments in rcar (newly added) and imx drivers (somehow I've missed that one in staging). Also drop the reference in the drm DocBook. ARM compile-fail reported by Rob Clark. v3: Move the removal of dev->buf_asnyc assignment in drm_setup to this patch here. v4: Actually git add ... tsk. Cc: Dave Airlie Cc: Laurent Pinchart Cc: Rob Clark Acked-by: Laurent Pinchart Signed-off-by: Daniel Vetter Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 1 - drivers/gpu/drm/ast/ast_drv.c | 1 - drivers/gpu/drm/cirrus/cirrus_drv.c | 1 - drivers/gpu/drm/drm_fops.c | 14 -------------- drivers/gpu/drm/gma500/psb_drv.c | 1 - drivers/gpu/drm/i810/i810_dma.c | 1 - drivers/gpu/drm/i810/i810_drv.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 1 - drivers/gpu/drm/mga/mga_drv.c | 1 - drivers/gpu/drm/mgag200/mgag200_drv.c | 1 - drivers/gpu/drm/nouveau/nouveau_drm.c | 1 - drivers/gpu/drm/omapdrm/omap_drv.c | 1 - drivers/gpu/drm/qxl/qxl_drv.c | 1 - drivers/gpu/drm/r128/r128_drv.c | 1 - drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 1 - drivers/gpu/drm/savage/savage_drv.c | 1 - drivers/gpu/drm/shmobile/shmob_drm_drv.c | 1 - drivers/gpu/drm/sis/sis_drv.c | 1 - drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - drivers/gpu/drm/tilcdc/tilcdc_drv.c | 1 - drivers/gpu/drm/udl/udl_drv.c | 1 - drivers/gpu/drm/via/via_drv.c | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - drivers/gpu/host1x/drm/drm.c | 1 - drivers/staging/imx-drm/imx-drm-core.c | 1 - include/drm/drmP.h | 3 --- 27 files changed, 43 deletions(-) diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 87e22ecd9281..9494ab8af9e7 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -2504,7 +2504,6 @@ void (*postclose) (struct drm_device *, struct drm_file *); .poll = drm_poll, .read = drm_read, - .fasync = drm_fasync, .llseek = no_llseek, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index a144fb044852..60f1ce3998c3 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -190,7 +190,6 @@ static const struct file_operations ast_fops = { .unlocked_ioctl = drm_ioctl, .mmap = ast_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index d35d99c15f84..dd9c908ab3fc 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -85,7 +85,6 @@ static const struct file_operations cirrus_driver_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif - .fasync = drm_fasync, }; static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR, diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 10334999f229..1817f03efe80 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -73,8 +73,6 @@ static int drm_setup(struct drm_device * dev) dev->last_context = 0; dev->if_version = 0; - dev->buf_async = NULL; - DRM_DEBUG("\n"); /* @@ -372,18 +370,6 @@ out_put_pid: return ret; } -/** No-op. */ -int drm_fasync(int fd, struct file *filp, int on) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - - DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, - (long)old_encode_dev(priv->minor->device)); - return fasync_helper(fd, filp, on, &dev->buf_async); -} -EXPORT_SYMBOL(drm_fasync); - static void drm_master_release(struct drm_device *dev, struct file *filp) { struct drm_file *file_priv = filp->private_data; diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index d13c2fc848bc..99b5293972c6 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -622,7 +622,6 @@ static const struct file_operations psb_gem_fops = { .unlocked_ioctl = psb_unlocked_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, }; diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index eac755bb8f9b..ab1892eb1074 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = { .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = i810_mmap_buffers, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 2e91fc3580b4..d85c05b4877d 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 13457e3e9cad..9411a745adaf 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -994,7 +994,6 @@ static const struct file_operations i915_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 17d0a637e4fb..fe71e1e44e48 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = mga_compat_ioctl, #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index bd9196478735..b570127ae3b2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = mgag200_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1faa75f42393..b29d04b822ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -673,7 +673,6 @@ nouveau_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = nouveau_ttm_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #if defined(CONFIG_COMPAT) .compat_ioctl = nouveau_compat_ioctl, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 47e64f916254..2603d909f49c 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -592,7 +592,6 @@ static const struct file_operations omapdriver_fops = { .release = drm_release, .mmap = omap_gem_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, .llseek = noop_llseek, }; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 48f2dfdeabcb..514118ae72d4 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = { .release = drm_release, .unlocked_ioctl = drm_ioctl, .poll = drm_poll, - .fasync = drm_fasync, .mmap = qxl_mmap, }; diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 472c38fe123f..c2338cbc56ad 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = r128_compat_ioctl, #endif diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 3585f22c5c15..3e52331124de 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -266,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_compat_ioctl, @@ -375,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = { .unlocked_ioctl = drm_ioctl, .mmap = radeon_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_kms_compat_ioctl, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 0a9f1bb88337..0023f9719cf1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -148,7 +148,6 @@ static const struct file_operations rcar_du_fops = { #endif .poll = drm_poll, .read = drm_read, - .fasync = drm_fasync, .llseek = no_llseek, .mmap = drm_gem_cma_mmap, }; diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 71b2081e7835..9135c8bd6fbc 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 7f2ea1a5a45f..015551866b4a 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = { #endif .poll = drm_poll, .read = drm_read, - .fasync = drm_fasync, .llseek = no_llseek, .mmap = drm_gem_cma_mmap, }; diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 5a5325e6b759..b88b2d302105 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index ddfa743459d0..951ec13e4e5c 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 14801c2235ae..116da199b942 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -497,7 +497,6 @@ static const struct file_operations fops = { #endif .poll = drm_poll, .read = drm_read, - .fasync = drm_fasync, .llseek = no_llseek, .mmap = drm_gem_cma_mmap, }; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index bb0af58c769a..7650dc0d78ce 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = { .read = drm_read, .unlocked_ioctl = drm_ioctl, .release = drm_release, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index f4ae20327941..448799968a06 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 81ef6bac8737..2dd1919485e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1120,7 +1120,6 @@ static const struct file_operations vmwgfx_driver_fops = { .mmap = vmw_mmap, .poll = vmw_fops_poll, .read = vmw_fops_read, - .fasync = drm_fasync, #if defined(CONFIG_COMPAT) .compat_ioctl = drm_compat_ioctl, #endif diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c index b128b90a94f6..15684bf073fb 100644 --- a/drivers/gpu/host1x/drm/drm.c +++ b/drivers/gpu/host1x/drm/drm.c @@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = { .unlocked_ioctl = drm_ioctl, .mmap = tegra_drm_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index a8900496b980..29607c25b261 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -207,7 +207,6 @@ static const struct file_operations imx_drm_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_gem_cma_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, .llseek = noop_llseek, }; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 277f307e053d..cef9a507246e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1166,8 +1166,6 @@ struct drm_device { /*@} */ - struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ - struct drm_agp_head *agp; /**< AGP data */ struct device *dev; /**< Device structure */ @@ -1264,7 +1262,6 @@ extern int drm_lastclose(struct drm_device *dev); extern struct mutex drm_global_mutex; extern int drm_open(struct inode *inode, struct file *filp); extern int drm_stub_open(struct inode *inode, struct file *filp); -extern int drm_fasync(int fd, struct file *filp, int on); extern ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); extern int drm_release(struct inode *inode, struct file *filp); -- cgit v1.2.3 From 687fbb2e4f46ad00cbfef3f0da2425af7e8684a1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:24 +0200 Subject: drm: rip out DRIVER_FB_DMA and related code No driver ever sets that flag, so good riddance! Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 161 +-------------------------------------------- include/drm/drmP.h | 1 - 2 files changed, 2 insertions(+), 160 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index cccc25f7ef86..5f73f0af6125 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1130,161 +1130,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request return 0; } -static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) - return -EINVAL; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lu\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - spin_lock(&dev->count_lock); - if (dev->buf_use) { - spin_unlock(&dev->count_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->count_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kzalloc(count * sizeof(*entry->buflist), - GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_FB; - - atomic_dec(&dev->buf_alloc); - return 0; -} - - /** * Add buffers for DMA transfers (ioctl). * @@ -1319,7 +1164,7 @@ int drm_addbufs(struct drm_device *dev, void *data, if (request->flags & _DRM_SG_BUFFER) ret = drm_addbufs_sg(dev, request); else if (request->flags & _DRM_FB_BUFFER) - ret = drm_addbufs_fb(dev, request); + ret = -EINVAL; else ret = drm_addbufs_pci(dev, request); @@ -1556,9 +1401,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) - && (dma->flags & _DRM_DMA_USE_SG)) - || (drm_core_check_feature(dev, DRIVER_FB_DMA) - && (dma->flags & _DRM_DMA_USE_FB))) { + && (dma->flags & _DRM_DMA_USE_SG))) { struct drm_local_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index cef9a507246e..922e426b3391 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -147,7 +147,6 @@ int drm_err(const char *func, const char *format, ...); #define DRIVER_IRQ_SHARED 0x80 #define DRIVER_IRQ_VBL 0x100 #define DRIVER_DMA_QUEUE 0x200 -#define DRIVER_FB_DMA 0x400 #define DRIVER_IRQ_VBL2 0x800 #define DRIVER_GEM 0x1000 #define DRIVER_MODESET 0x2000 -- cgit v1.2.3 From 74867e3d53e41afe2a093196850167542fa508d2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:25 +0200 Subject: drm: rip out a few unused DRIVER flags The gma500 driver somehow set the DRIVER_IRQ_VBL flag, but since there's no code at all to check for this we can kill it. The other two are completely unused. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/gma500/psb_drv.c | 2 +- include/drm/drmP.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 99b5293972c6..1383e75acf25 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -627,7 +627,7 @@ static const struct file_operations psb_gem_fops = { static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ - DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM , + DRIVER_MODESET | DRIVER_GEM , .load = psb_driver_load, .unload = psb_driver_unload, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 922e426b3391..0dc64fca3ebb 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -145,9 +145,6 @@ int drm_err(const char *func, const char *format, ...); #define DRIVER_HAVE_DMA 0x20 #define DRIVER_HAVE_IRQ 0x40 #define DRIVER_IRQ_SHARED 0x80 -#define DRIVER_IRQ_VBL 0x100 -#define DRIVER_DMA_QUEUE 0x200 -#define DRIVER_IRQ_VBL2 0x800 #define DRIVER_GEM 0x1000 #define DRIVER_MODESET 0x2000 #define DRIVER_PRIME 0x4000 -- cgit v1.2.3 From fac3eaffb1139156099b6111337525d47e65bca6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:26 +0200 Subject: drm: remove a bunch of unused #defines from drmP.h Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- include/drm/drmP.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0dc64fca3ebb..6a0918e7db65 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -163,13 +163,7 @@ int drm_err(const char *func, const char *format, ...); #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ #define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ -#define DRM_LOOPING_LIMIT 5000000 -#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ -#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ -#define DRM_FLAG_DEBUG 0x01 - -#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAP_HASH_OFFSET 0x10000000 /*@}*/ @@ -258,9 +252,6 @@ int drm_err(const char *func, const char *format, ...); #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) -#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) -#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) - #define DRM_IF_VERSION(maj, min) (maj << 16 | min) /** -- cgit v1.2.3 From d678959f0a16da74beaa7e7c2fbb943ebd08a1d3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:30 +0200 Subject: drm/memory: don't export agp helpers They're only used by the agpgart support code in drm_agpgart.c, not by any drivers. I think long-term we should create a drm_internal.h include file with all the various functions only used by the drm core and not exported to drivers, and remove them from drmP.h. Oh, and someone should kill that upper-case P sometimes ;-) But that's all stuff for future patch bombs. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_memory.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 126d50ea181f..64e44fad8ae8 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages) { agp_free_memory(handle); } -EXPORT_SYMBOL(drm_free_agp); /** Wrapper around agp_bind_memory() */ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) @@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) { return agp_unbind_memory(handle); } -EXPORT_SYMBOL(drm_unbind_agp); #else /* __OS_HAS_AGP */ static inline void *agp_remap(unsigned long offset, unsigned long size, -- cgit v1.2.3 From 719524df4a2e48fa7ca3ad1697fd9a7f85ec8ad3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:31 +0200 Subject: drm: hollow-out GET_CLIENT ioctl We not only have debugfs files to do pretty much the equivalent of lsof, we also have an ioctl. Not that compared to lsof this dumps a wee bit more information, but we can still get at that from debugfs easily. I've dug around in mesa, libdrm and ddx histories and the only users seem to be drm/tests/dristat.c and drm/tests/getclients.c. The later is a testcase for the ioctl itself since up to commit b018fcdaa5e8b4eabb8cffda687d00004a3c4785 Author: Eric Anholt Date: Thu Nov 22 18:46:54 2007 +1000 drm: Make DRM_IOCTL_GET_CLIENT return EINVAL when it can't find client #idx there was actually no way at all for userspace to enumerate all clients since the kernel just wouldn't tell it when to stop. Which completely broke it's only user, dristat -c. So obviously that ioctl wasn't much use for debugging. Hence I don't see any point in keeping support for a tool which was pretty obviously never really used, and while we have good replacements in the form of equivalent debugfs files. Still, to keep dristat -c from looping forever again stop it early by returning an unconditional -EINVAL. Also add a comment in the code about why. v2: Slightly less hollowed-out implementation. libva uses GET_CLIENTS to figure out whether the fd it has is already authenticated or not. So we need to keep that part of things working. Simplest way is to just return one entry to keep va_drm_is_authenticated in libva/va/drm/va_drm_auth.c working. This is exercised by igt/drm_get_client_auth which contains a copypasta of the libva auth check code. Cc: Gwenole Beauchesne Cc: David Herrmann Reviewed-by: David Herrmann Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_ioctl.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 0acf0807d1ad..ac8ca5ce0822 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_client *client = data; - struct drm_file *pt; - int idx; - int i; - - idx = client->idx; - i = 0; - mutex_lock(&dev->struct_mutex); - list_for_each_entry(pt, &dev->filelist, lhead) { - if (i++ >= idx) { - client->auth = pt->authenticated; - client->pid = pid_vnr(pt->pid); - client->uid = from_kuid_munged(current_user_ns(), pt->uid); - client->magic = pt->magic; - client->iocs = pt->ioctl_count; - mutex_unlock(&dev->struct_mutex); - - return 0; - } + /* + * Hollowed-out getclient ioctl to keep some dead old drm tests/tools + * not breaking completely. Userspace tools stop enumerating one they + * get -EINVAL, hence this is the return value we need to hand back for + * no clients tracked. + * + * Unfortunately some clients (*cough* libva *cough*) use this in a fun + * attempt to figure out whether they're authenticated or not. Since + * that's the only thing they care about, give it to the directly + * instead of walking one giant list. + */ + if (client->idx == 0) { + client->auth = file_priv->authenticated; + client->pid = pid_vnr(file_priv->pid); + client->uid = from_kuid_munged(current_user_ns(), + file_priv->uid); + client->magic = 0; + client->iocs = 0; + + return 0; + } else { + return -EINVAL; } - mutex_unlock(&dev->struct_mutex); - - return -EINVAL; } /** -- cgit v1.2.3 From d79cdc8312689b39c6d83718c1c196af4b3cd18c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:32 +0200 Subject: drm: no-op out GET_STATS ioctl Again only used by a tests in libdrm and by dristat. Nowadays we have much better tracing tools to get detailed insights into what a drm driver is doing. And for a simple "does it work" kind of question that these stats could answer we have plenty of dmesg debug log spew. So I don't see any use for this stat gathering complexity at all. To be able to gradually drop things start with ripping out the interfaces to it, here the ioctl. To prevent dristat from eating its own stack garbage we can't use the drm_noop ioctl though, since we need to clear the return data with a memset. Cc: Eric Anholt Signed-off-by: Daniel Vetter Reviewed-by: Eric Anholt Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_ioctl.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ac8ca5ce0822..cffc7c0e1171 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -257,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_stats *stats = data; - int i; + /* Clear stats to prevent userspace from eating its stack garbage. */ memset(stats, 0, sizeof(*stats)); - for (i = 0; i < dev->counters; i++) { - if (dev->types[i] == _DRM_STAT_LOCK) - stats->data[i].value = - (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); - else - stats->data[i].value = atomic_read(&dev->counts[i]); - stats->data[i].type = dev->types[i]; - } - - stats->count = dev->counters; - return 0; } -- cgit v1.2.3 From b17df86ece5e674c6f50b9a219067b9a80ea9dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Marchesin?= Date: Tue, 13 Aug 2013 11:55:12 -0700 Subject: drm: Remove drm_mode_validate_clocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function is unused. Signed-off-by: Stéphane Marchesin Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 37 ------------------------------------- include/drm/drm_crtc.h | 3 --- 2 files changed, 40 deletions(-) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index a6729bfe6860..504a602f495c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -922,43 +922,6 @@ void drm_mode_validate_size(struct drm_device *dev, } EXPORT_SYMBOL(drm_mode_validate_size); -/** - * drm_mode_validate_clocks - validate modes against clock limits - * @dev: DRM device - * @mode_list: list of modes to check - * @min: minimum clock rate array - * @max: maximum clock rate array - * @n_ranges: number of clock ranges (size of arrays) - * - * LOCKING: - * Caller must hold a lock protecting @mode_list. - * - * Some code may need to check a mode list against the clock limits of the - * device in question. This function walks the mode list, testing to make - * sure each mode falls within a given range (defined by @min and @max - * arrays) and sets @mode->status as needed. - */ -void drm_mode_validate_clocks(struct drm_device *dev, - struct list_head *mode_list, - int *min, int *max, int n_ranges) -{ - struct drm_display_mode *mode; - int i; - - list_for_each_entry(mode, mode_list, head) { - bool good = false; - for (i = 0; i < n_ranges; i++) { - if (mode->clock >= min[i] && mode->clock <= max[i]) { - good = true; - break; - } - } - if (!good) - mode->status = MODE_CLOCK_RANGE; - } -} -EXPORT_SYMBOL(drm_mode_validate_clocks); - /** * drm_mode_prune_invalid - remove invalid modes from mode list * @dev: DRM device diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index fa12a2fa4293..32e0820357e6 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -930,9 +930,6 @@ extern void drm_mode_list_concat(struct list_head *head, extern void drm_mode_validate_size(struct drm_device *dev, struct list_head *mode_list, int maxX, int maxY, int maxPitch); -extern void drm_mode_validate_clocks(struct drm_device *dev, - struct list_head *mode_list, - int *min, int *max, int n_ranges); extern void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); extern void drm_mode_sort(struct list_head *mode_list); -- cgit v1.2.3 From cabaafc78935521c5abc7ec72278dbaa5400c995 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 14:41:54 -0400 Subject: drm: add flip-work helper A small helper to queue up work to do, from workqueue context, after a flip. Typically useful to defer unreffing buffers that may be read by the display controller until vblank. v1: original v2: wire up docbook + couple docbook fixes Signed-off-by: Rob Clark Acked-by: Daniel Vetter Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 6 ++ drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_flip_work.c | 124 ++++++++++++++++++++++++++++++++++++++++ include/drm/drm_flip_work.h | 76 ++++++++++++++++++++++++ 4 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/drm_flip_work.c create mode 100644 include/drm/drm_flip_work.h diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 9494ab8af9e7..f97d08ba59be 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -2211,6 +2211,12 @@ void intel_crt_init(struct drm_device *dev) !Pinclude/drm/drm_rect.h rect utils !Iinclude/drm/drm_rect.h !Edrivers/gpu/drm/drm_rect.c + + + Flip-work Helper Reference +!Pinclude/drm/drm_flip_work.h flip utils +!Iinclude/drm/drm_flip_work.h +!Edrivers/gpu/drm/drm_flip_work.c VMA Offset Manager diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index d943b94afc90..2aaf082368a2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -13,7 +13,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_crtc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ - drm_rect.o drm_vma_manager.o + drm_rect.o drm_vma_manager.o drm_flip_work.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c new file mode 100644 index 000000000000..e788882d9021 --- /dev/null +++ b/drivers/gpu/drm/drm_flip_work.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "drmP.h" +#include "drm_flip_work.h" + +/** + * drm_flip_work_queue - queue work + * @work: the flip-work + * @val: the value to queue + * + * Queues work, that will later be run (passed back to drm_flip_func_t + * func) on a work queue after drm_flip_work_commit() is called. + */ +void drm_flip_work_queue(struct drm_flip_work *work, void *val) +{ + if (kfifo_put(&work->fifo, (const void **)&val)) { + atomic_inc(&work->pending); + } else { + DRM_ERROR("%s fifo full!\n", work->name); + work->func(work, val); + } +} +EXPORT_SYMBOL(drm_flip_work_queue); + +/** + * drm_flip_work_commit - commit queued work + * @work: the flip-work + * @wq: the work-queue to run the queued work on + * + * Trigger work previously queued by drm_flip_work_queue() to run + * on a workqueue. The typical usage would be to queue work (via + * drm_flip_work_queue()) at any point (from vblank irq and/or + * prior), and then from vblank irq commit the queued work. + */ +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq) +{ + uint32_t pending = atomic_read(&work->pending); + atomic_add(pending, &work->count); + atomic_sub(pending, &work->pending); + queue_work(wq, &work->worker); +} +EXPORT_SYMBOL(drm_flip_work_commit); + +static void flip_worker(struct work_struct *w) +{ + struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); + uint32_t count = atomic_read(&work->count); + void *val = NULL; + + atomic_sub(count, &work->count); + + while(count--) + if (!WARN_ON(!kfifo_get(&work->fifo, &val))) + work->func(work, val); +} + +/** + * drm_flip_work_init - initialize flip-work + * @work: the flip-work to initialize + * @size: the max queue depth + * @name: debug name + * @func: the callback work function + * + * Initializes/allocates resources for the flip-work + * + * RETURNS: + * Zero on success, error code on failure. + */ +int drm_flip_work_init(struct drm_flip_work *work, int size, + const char *name, drm_flip_func_t func) +{ + int ret; + + work->name = name; + atomic_set(&work->count, 0); + atomic_set(&work->pending, 0); + work->func = func; + + ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL); + if (ret) { + DRM_ERROR("could not allocate %s fifo\n", name); + return ret; + } + + INIT_WORK(&work->worker, flip_worker); + + return 0; +} +EXPORT_SYMBOL(drm_flip_work_init); + +/** + * drm_flip_work_cleanup - cleans up flip-work + * @work: the flip-work to cleanup + * + * Destroy resources allocated for the flip-work + */ +void drm_flip_work_cleanup(struct drm_flip_work *work) +{ + WARN_ON(!kfifo_is_empty(&work->fifo)); + kfifo_free(&work->fifo); +} +EXPORT_SYMBOL(drm_flip_work_cleanup); diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h new file mode 100644 index 000000000000..35c776ae7d3b --- /dev/null +++ b/include/drm/drm_flip_work.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_FLIP_WORK_H +#define DRM_FLIP_WORK_H + +#include +#include + +/** + * DOC: flip utils + * + * Util to queue up work to run from work-queue context after flip/vblank. + * Typically this can be used to defer unref of framebuffer's, cursor + * bo's, etc until after vblank. The APIs are all safe (and lockless) + * for up to one producer and once consumer at a time. The single-consumer + * aspect is ensured by committing the queued work to a single work-queue. + */ + +struct drm_flip_work; + +/* + * drm_flip_func_t - callback function + * + * @work: the flip work + * @val: value queued via drm_flip_work_queue() + * + * Callback function to be called for each of the queue'd work items after + * drm_flip_work_commit() is called. + */ +typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); + +/** + * struct drm_flip_work - flip work queue + * @name: debug name + * @pending: number of queued but not committed items + * @count: number of committed items + * @func: callback fxn called for each committed item + * @worker: worker which calls @func + */ +struct drm_flip_work { + const char *name; + atomic_t pending, count; + drm_flip_func_t func; + struct work_struct worker; + DECLARE_KFIFO_PTR(fifo, void *); +}; + +void drm_flip_work_queue(struct drm_flip_work *work, void *val); +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq); +int drm_flip_work_init(struct drm_flip_work *work, int size, + const char *name, drm_flip_func_t func); +void drm_flip_work_cleanup(struct drm_flip_work *work); + +#endif /* DRM_FLIP_WORK_H */ -- cgit v1.2.3 From a464d618c715b7a850f7459754d9d155f5e60538 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:20 -0400 Subject: drm/tilcdc: use flip-work helper Signed-off-by: Rob Clark Tested-by: Darren Etheridge Signed-off-by: Dave Airlie --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 6d0524095fe3..fe4726628906 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -15,7 +15,7 @@ * this program. If not, see . */ -#include +#include "drm_flip_work.h" #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -35,21 +35,18 @@ struct tilcdc_crtc { struct drm_framebuffer *scanout[2]; /* for deferred fb unref's: */ - DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *); - struct work_struct work; + struct drm_flip_work unref_work; }; #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) -static void unref_worker(struct work_struct *work) +static void unref_worker(struct drm_flip_work *work, void *val) { struct tilcdc_crtc *tilcdc_crtc = - container_of(work, struct tilcdc_crtc, work); + container_of(work, struct tilcdc_crtc, unref_work); struct drm_device *dev = tilcdc_crtc->base.dev; - struct drm_framebuffer *fb; mutex_lock(&dev->mode_config.mutex); - while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb)) - drm_framebuffer_unreference(fb); + drm_framebuffer_unreference(val); mutex_unlock(&dev->mode_config.mutex); } @@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n) }; struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; + struct tilcdc_drm_private *priv = dev->dev_private; pm_runtime_get_sync(dev->dev); tilcdc_write(dev, base_reg[n], tilcdc_crtc->start); tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end); if (tilcdc_crtc->scanout[n]) { - if (kfifo_put(&tilcdc_crtc->unref_fifo, - (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) { - struct tilcdc_drm_private *priv = dev->dev_private; - queue_work(priv->wq, &tilcdc_crtc->work); - } else { - dev_err(dev->dev, "unref fifo full!\n"); - drm_framebuffer_unreference(tilcdc_crtc->scanout[n]); - } + drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]); + drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); } tilcdc_crtc->scanout[n] = crtc->fb; drm_framebuffer_reference(tilcdc_crtc->scanout[n]); @@ -149,8 +141,8 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc) WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON); drm_crtc_cleanup(crtc); - WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo)); - kfifo_free(&tilcdc_crtc->unref_fifo); + drm_flip_work_cleanup(&tilcdc_crtc->unref_work); + kfree(tilcdc_crtc); } @@ -671,14 +663,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev) tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; init_waitqueue_head(&tilcdc_crtc->frame_done_wq); - ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL); + ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16, + "unref", unref_worker); if (ret) { dev_err(dev->dev, "could not allocate unref FIFO\n"); goto fail; } - INIT_WORK(&tilcdc_crtc->work, unref_worker); - ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); if (ret < 0) goto fail; -- cgit v1.2.3 From 5833bd2fe1c7d9e2650a11a8785b848bbd0d0188 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:21 -0400 Subject: drm/omap: use flip-work helper And simplify how we hold a ref+pin to what is being scanned out by using fb refcnt'ing. The previous logic pre-dated fb refcnt, and as a result was less straightforward than it could have been. By holding a ref to the fb, we don't have to care about how many plane's there are and holding a ref to each color plane's bo. Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/omapdrm/omap_drv.h | 5 +-- drivers/gpu/drm/omapdrm/omap_fb.c | 74 +++++++++++++++++------------------- drivers/gpu/drm/omapdrm/omap_plane.c | 51 +++++++++++-------------- 3 files changed, 58 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index f2ba425d80dd..30b95b736658 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); -int omap_framebuffer_replace(struct drm_framebuffer *a, - struct drm_framebuffer *b, void *arg, - void (*unpin)(void *arg, struct drm_gem_object *bo)); +int omap_framebuffer_pin(struct drm_framebuffer *fb); +int omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, struct omap_drm_window *win, struct omap_overlay_info *info); struct drm_connector *omap_framebuffer_get_next_connector( diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 8031402e7951..f2b8f0668c0c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, } } -/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although - * buffers to unpin are just pushed to the unpin fifo so that the - * caller can defer unpin until vblank. - * - * Note if this fails (ie. something went very wrong!), all buffers are - * unpinned, and the caller disables the overlay. We could have tried - * to revert back to the previous set of pinned buffers but if things are - * hosed there is no guarantee that would succeed. - */ -int omap_framebuffer_replace(struct drm_framebuffer *a, - struct drm_framebuffer *b, void *arg, - void (*unpin)(void *arg, struct drm_gem_object *bo)) +/* pin, prepare for scanout: */ +int omap_framebuffer_pin(struct drm_framebuffer *fb) { - int ret = 0, i, na, nb; - struct omap_framebuffer *ofba = to_omap_framebuffer(a); - struct omap_framebuffer *ofbb = to_omap_framebuffer(b); - uint32_t pinned_mask = 0; + struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); + int ret, i, n = drm_format_num_planes(fb->pixel_format); - na = a ? drm_format_num_planes(a->pixel_format) : 0; - nb = b ? drm_format_num_planes(b->pixel_format) : 0; + for (i = 0; i < n; i++) { + struct plane *plane = &omap_fb->planes[i]; + ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true); + if (ret) + goto fail; + omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE); + } - for (i = 0; i < max(na, nb); i++) { - struct plane *pa, *pb; + return 0; - pa = (i < na) ? &ofba->planes[i] : NULL; - pb = (i < nb) ? &ofbb->planes[i] : NULL; +fail: + for (i--; i >= 0; i--) { + struct plane *plane = &omap_fb->planes[i]; + omap_gem_put_paddr(plane->bo); + plane->paddr = 0; + } - if (pa) - unpin(arg, pa->bo); + return ret; +} - if (pb && !ret) { - ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true); - if (!ret) { - omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE); - pinned_mask |= (1 << i); - } - } - } +/* unpin, no longer being scanned out: */ +int omap_framebuffer_unpin(struct drm_framebuffer *fb) +{ + struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); + int ret, i, n = drm_format_num_planes(fb->pixel_format); - if (ret) { - /* something went wrong.. unpin what has been pinned */ - for (i = 0; i < nb; i++) { - if (pinned_mask & (1 << i)) { - struct plane *pb = &ofba->planes[i]; - unpin(arg, pb->bo); - } - } + for (i = 0; i < n; i++) { + struct plane *plane = &omap_fb->planes[i]; + ret = omap_gem_put_paddr(plane->bo); + if (ret) + goto fail; + plane->paddr = 0; } + return 0; + +fail: return ret; } diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 8d225d7ff4e3..046d5e660c04 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -17,7 +17,7 @@ * this program. If not, see . */ -#include +#include "drm_flip_work.h" #include "omap_drv.h" #include "omap_dmm_tiler.h" @@ -58,26 +58,23 @@ struct omap_plane { struct omap_drm_irq error_irq; - /* set of bo's pending unpin until next post_apply() */ - DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *); + /* for deferring bo unpin's until next post_apply(): */ + struct drm_flip_work unpin_work; // XXX maybe get rid of this and handle vblank in crtc too? struct callback apply_done_cb; }; -static void unpin(void *arg, struct drm_gem_object *bo) +static void unpin_worker(struct drm_flip_work *work, void *val) { - struct drm_plane *plane = arg; - struct omap_plane *omap_plane = to_omap_plane(plane); + struct omap_plane *omap_plane = + container_of(work, struct omap_plane, unpin_work); + struct drm_device *dev = omap_plane->base.dev; - if (kfifo_put(&omap_plane->unpin_fifo, - (const struct drm_gem_object **)&bo)) { - /* also hold a ref so it isn't free'd while pinned */ - drm_gem_object_reference(bo); - } else { - dev_err(plane->dev->dev, "unpin fifo full!\n"); - omap_gem_put_paddr(bo); - } + omap_framebuffer_unpin(val); + mutex_lock(&dev->mode_config.mutex); + drm_framebuffer_unreference(val); + mutex_unlock(&dev->mode_config.mutex); } /* update which fb (if any) is pinned for scanout */ @@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; if (pinned_fb != fb) { - int ret; + int ret = 0; DBG("%p -> %p", pinned_fb, fb); - if (fb) + if (fb) { drm_framebuffer_reference(fb); - - ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin); + ret = omap_framebuffer_pin(fb); + } if (pinned_fb) - drm_framebuffer_unreference(pinned_fb); + drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb); if (ret) { dev_err(plane->dev->dev, "could not swap %p -> %p\n", omap_plane->pinned_fb, fb); - if (fb) - drm_framebuffer_unreference(fb); + drm_framebuffer_unreference(fb); omap_plane->pinned_fb = NULL; return ret; } @@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply) struct omap_plane *omap_plane = container_of(apply, struct omap_plane, apply); struct drm_plane *plane = &omap_plane->base; + struct omap_drm_private *priv = plane->dev->dev_private; struct omap_overlay_info *info = &omap_plane->info; - struct drm_gem_object *bo = NULL; struct callback cb; cb = omap_plane->apply_done_cb; omap_plane->apply_done_cb.fxn = NULL; - while (kfifo_get(&omap_plane->unpin_fifo, &bo)) { - omap_gem_put_paddr(bo); - drm_gem_object_unreference_unlocked(bo); - } + drm_flip_work_commit(&omap_plane->unpin_work, priv->wq); if (cb.fxn) cb.fxn(cb.arg); @@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane) omap_plane_disable(plane); drm_plane_cleanup(plane); - WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo)); - kfifo_free(&omap_plane->unpin_fifo); + drm_flip_work_cleanup(&omap_plane->unpin_work); kfree(omap_plane); } @@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, if (!omap_plane) goto fail; - ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL); + ret = drm_flip_work_init(&omap_plane->unpin_work, 16, + "unpin", unpin_worker); if (ret) { dev_err(dev->dev, "could not allocate unpin FIFO\n"); goto fail; -- cgit v1.2.3 From 367bbd49202dd256dce1217c2f7cd0d5d1916f7b Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:23 -0400 Subject: drm/gem: add drm_gem_create_mmap_offset_size() Variant of drm_gem_create_mmap_offset() which doesn't make the assumption that virtual size and physical size (obj->size) are the same. This is needed in omapdrm to deal with tiled buffers. And lets us get rid of a duplicated and slightly modified version of drm_gem_create_mmap_offset() in omapdrm. Signed-off-by: Rob Clark Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 28 ++++++++++++++++++++++++---- include/drm/drmP.h | 1 + 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9ab038c8dd5f..a8ba7da83d45 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -317,24 +317,44 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj) EXPORT_SYMBOL(drm_gem_free_mmap_offset); /** - * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object * @obj: obj in question + * @size: the virtual size * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * - * This routine allocates and attaches a fake offset for @obj. + * This routine allocates and attaches a fake offset for @obj, in cases where + * the virtual size differs from the physical size (ie. obj->size). Otherwise + * just use drm_gem_create_mmap_offset(). */ int -drm_gem_create_mmap_offset(struct drm_gem_object *obj) +drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, - obj->size / PAGE_SIZE); + size / PAGE_SIZE); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); + +/** + * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + */ +int drm_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + return drm_gem_create_mmap_offset_size(obj, obj->size); } EXPORT_SYMBOL(drm_gem_create_mmap_offset); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 6a0918e7db65..d6d9a28fc6b4 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1614,6 +1614,7 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, -- cgit v1.2.3 From bcc5c9d50e93bb7d949f6f38063b62dd35ca84d1 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:24 -0400 Subject: drm/gem: add shmem get/put page helpers Basically just extracting some code duplicated in gma500, omapdrm, udl, and upcoming msm driver. Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drmP.h | 4 ++ 2 files changed, 107 insertions(+) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a8ba7da83d45..a4c8e8fba599 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -358,6 +358,109 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_create_mmap_offset); +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * from shmem + * @obj: obj in question + * @gfpmask: gfp mask of requested pages + */ +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +{ + struct inode *inode; + struct address_space *mapping; + struct page *p, **pages; + int i, npages; + + /* This is the shared memory object that backs the GEM resource */ + inode = file_inode(obj->filp); + mapping = inode->i_mapping; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + gfpmask |= mapping_gfp_mask(mapping); + + for (i = 0; i < npages; i++) { + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + + /* There is a hypothetical issue w/ drivers that require + * buffer memory in the low 4GB.. if the pages are un- + * pinned, and swapped out, they can end up swapped back + * in above 4GB. If pages are already in memory, then + * shmem_read_mapping_page_gfp will ignore the gfpmask, + * even if the already in-memory page disobeys the mask. + * + * It is only a theoretical issue today, because none of + * the devices with this limitation can be populated with + * enough memory to trigger the issue. But this BUG_ON() + * is here as a reminder in case the problem with + * shmem_read_mapping_page_gfp() isn't solved by the time + * it does become a real issue. + * + * See this thread: http://lkml.org/lkml/2011/7/11/238 + */ + BUG_ON((gfpmask & __GFP_DMA32) && + (page_to_pfn(p) >= 0x00100000UL)); + } + + return pages; + +fail: + while (i--) + page_cache_release(pages[i]); + + drm_free_large(pages); + return ERR_CAST(p); +} +EXPORT_SYMBOL(drm_gem_get_pages); + +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + * @dirty: if true, pages will be marked as dirty + * @accessed: if true, the pages will be marked as accessed + */ +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed) +{ + int i, npages; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + for (i = 0; i < npages; i++) { + if (dirty) + set_page_dirty(pages[i]); + + if (accessed) + mark_page_accessed(pages[i]); + + /* Undo the reference we took when populating the table */ + page_cache_release(pages[i]); + } + + drm_free_large(pages); +} +EXPORT_SYMBOL(drm_gem_put_pages); + /** Returns a reference to the object named by the handle. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d6d9a28fc6b4..91f343c8b160 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1616,6 +1616,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed); + struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, u32 handle); -- cgit v1.2.3 From 8b9ba7a38c4916e67f43223a7973374e7c44e5cb Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:25 -0400 Subject: drm/gma500: use gem get/put page helpers Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/gma500/gtt.c | 38 ++++++-------------------------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 1f82183536a3..92babac362ec 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) */ static int psb_gtt_attach_pages(struct gtt_range *gt) { - struct inode *inode; - struct address_space *mapping; - int i; - struct page *p; - int pages = gt->gem.size / PAGE_SIZE; + struct page **pages; WARN_ON(gt->pages); - /* This is the shared memory object that backs the GEM resource */ - inode = file_inode(gt->gem.filp); - mapping = inode->i_mapping; + pages = drm_gem_get_pages(>->gem, 0); + if (IS_ERR(pages)) + return PTR_ERR(pages); - gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); - if (gt->pages == NULL) - return -ENOMEM; - gt->npage = pages; + gt->pages = pages; - for (i = 0; i < pages; i++) { - p = shmem_read_mapping_page(mapping, i); - if (IS_ERR(p)) - goto err; - gt->pages[i] = p; - } return 0; - -err: - while (i--) - page_cache_release(gt->pages[i]); - kfree(gt->pages); - gt->pages = NULL; - return PTR_ERR(p); } /** @@ -240,13 +220,7 @@ err: */ static void psb_gtt_detach_pages(struct gtt_range *gt) { - int i; - for (i = 0; i < gt->npage; i++) { - /* FIXME: do we need to force dirty */ - set_page_dirty(gt->pages[i]); - page_cache_release(gt->pages[i]); - } - kfree(gt->pages); + drm_gem_put_pages(>->gem, gt->pages, true, false); gt->pages = NULL; } -- cgit v1.2.3 From 5dc9e1e87229cb786a5bb58ddd0d60fee6eb4641 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:26 -0400 Subject: drm/udl: use gem get/put page helpers Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/udl/udl_gem.c | 44 ++++++------------------------------------- 1 file changed, 6 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index b5e3b8038253..8dbe9d0ae9a7 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -117,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj) static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) { - int page_count, i; - struct page *page; - struct inode *inode; - struct address_space *mapping; + struct page **pages; if (obj->pages) return 0; - page_count = obj->base.size / PAGE_SIZE; - BUG_ON(obj->pages != NULL); - obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); - if (obj->pages == NULL) - return -ENOMEM; - - inode = file_inode(obj->base.filp); - mapping = inode->i_mapping; - gfpmask |= mapping_gfp_mask(mapping); + pages = drm_gem_get_pages(&obj->base, gfpmask); + if (IS_ERR(pages)) + return PTR_ERR(pages); - for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(page)) - goto err_pages; - obj->pages[i] = page; - } + obj->pages = pages; return 0; -err_pages: - while (i--) - page_cache_release(obj->pages[i]); - drm_free_large(obj->pages); - obj->pages = NULL; - return PTR_ERR(page); } static void udl_gem_put_pages(struct udl_gem_object *obj) { - int page_count = obj->base.size / PAGE_SIZE; - int i; - - if (obj->base.import_attach) { - drm_free_large(obj->pages); - obj->pages = NULL; - return; - } - - for (i = 0; i < page_count; i++) - page_cache_release(obj->pages[i]); - - drm_free_large(obj->pages); + drm_gem_put_pages(&obj->base, obj->pages, false, false); obj->pages = NULL; } -- cgit v1.2.3 From ddcd09d62bd503bfd33291348b7cd8ad32d413fd Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Aug 2013 13:41:27 -0400 Subject: drm/omap: kill omap_gem_helpers.c Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/omapdrm/Makefile | 3 - drivers/gpu/drm/omapdrm/omap_gem.c | 8 +- drivers/gpu/drm/omapdrm/omap_gem_helpers.c | 124 ----------------------------- 3 files changed, 4 insertions(+), 131 deletions(-) delete mode 100644 drivers/gpu/drm/omapdrm/omap_gem_helpers.c diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index d85e058f2845..778372b062ad 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile @@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \ omap_dmm_tiler.o \ tcm-sita.o -# temporary: -omapdrm-y += omap_gem_helpers.o - obj-$(CONFIG_DRM_OMAP) += omapdrm.o diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b1f19702550f..533f6ebec531 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -237,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably * we actually want CMA memory for it all anyways.. */ - pages = _drm_gem_get_pages(obj, GFP_KERNEL); + pages = drm_gem_get_pages(obj, GFP_KERNEL); if (IS_ERR(pages)) { dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); return PTR_ERR(pages); @@ -271,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) return 0; free_pages: - _drm_gem_put_pages(obj, pages, true, false); + drm_gem_put_pages(obj, pages, true, false); return ret; } @@ -295,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj) kfree(omap_obj->addrs); omap_obj->addrs = NULL; - _drm_gem_put_pages(obj, omap_obj->pages, true, false); + drm_gem_put_pages(obj, omap_obj->pages, true, false); omap_obj->pages = NULL; } @@ -316,7 +316,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) /* Make it mmapable */ size = omap_gem_mmap_size(obj); - ret = _drm_gem_create_mmap_offset_size(obj, size); + ret = drm_gem_create_mmap_offset_size(obj, size); if (ret) { dev_err(dev->dev, "could not allocate mmap offset\n"); return 0; diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c deleted file mode 100644 index dbb157542f8f..000000000000 --- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * drivers/gpu/drm/omapdrm/omap_gem_helpers.c - * - * Copyright (C) 2011 Texas Instruments - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -/* temporary copy of drm_gem_{get,put}_pages() until the - * "drm/gem: add functions to get/put pages" patch is merged.. - */ - -#include -#include -#include - -#include - -/** - * drm_gem_get_pages - helper to allocate backing pages for a GEM object - * @obj: obj in question - * @gfpmask: gfp mask of requested pages - */ -struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) -{ - struct inode *inode; - struct address_space *mapping; - struct page *p, **pages; - int i, npages; - - /* This is the shared memory object that backs the GEM resource */ - inode = file_inode(obj->filp); - mapping = inode->i_mapping; - - npages = obj->size >> PAGE_SHIFT; - - pages = drm_malloc_ab(npages, sizeof(struct page *)); - if (pages == NULL) - return ERR_PTR(-ENOMEM); - - gfpmask |= mapping_gfp_mask(mapping); - - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(p)) - goto fail; - pages[i] = p; - - /* There is a hypothetical issue w/ drivers that require - * buffer memory in the low 4GB.. if the pages are un- - * pinned, and swapped out, they can end up swapped back - * in above 4GB. If pages are already in memory, then - * shmem_read_mapping_page_gfp will ignore the gfpmask, - * even if the already in-memory page disobeys the mask. - * - * It is only a theoretical issue today, because none of - * the devices with this limitation can be populated with - * enough memory to trigger the issue. But this BUG_ON() - * is here as a reminder in case the problem with - * shmem_read_mapping_page_gfp() isn't solved by the time - * it does become a real issue. - * - * See this thread: http://lkml.org/lkml/2011/7/11/238 - */ - BUG_ON((gfpmask & __GFP_DMA32) && - (page_to_pfn(p) >= 0x00100000UL)); - } - - return pages; - -fail: - while (i--) - page_cache_release(pages[i]); - - drm_free_large(pages); - return ERR_CAST(p); -} - -/** - * drm_gem_put_pages - helper to free backing pages for a GEM object - * @obj: obj in question - * @pages: pages to free - */ -void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, - bool dirty, bool accessed) -{ - int i, npages; - - npages = obj->size >> PAGE_SHIFT; - - for (i = 0; i < npages; i++) { - if (dirty) - set_page_dirty(pages[i]); - - if (accessed) - mark_page_accessed(pages[i]); - - /* Undo the reference we took when populating the table */ - page_cache_release(pages[i]); - } - - drm_free_large(pages); -} - -int -_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) -{ - struct drm_device *dev = obj->dev; - struct drm_gem_mm *mm = dev->mm_private; - - return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, - size / PAGE_SIZE); -} -- cgit v1.2.3 From b21e3afe2357c0f49348a5fb61247012bf8262ec Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Wed, 7 Aug 2013 22:34:48 -0400 Subject: drm: use ida to allocate connector ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it so that reloading a module does not cause all the connector ids to change, which are user-visible and sometimes used for configuration. Signed-off-by: Ilia Mirkin Reviewed-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 62 ++++++++++++++++++++++++++++++++-------------- drivers/gpu/drm/drm_drv.c | 2 ++ include/drm/drm_crtc.h | 2 ++ 3 files changed, 48 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fc83bb9eb514..a6917645fb4a 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -186,29 +186,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = { struct drm_conn_prop_enum_list { int type; const char *name; - int count; + struct ida ida; }; /* * Connector and encoder types. */ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = -{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, - { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, - { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, - { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, - { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, - { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, - { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, - { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, - { DRM_MODE_CONNECTOR_Component, "Component", 0 }, - { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, - { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, - { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, - { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, - { DRM_MODE_CONNECTOR_TV, "TV", 0 }, - { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, - { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, +{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, + { DRM_MODE_CONNECTOR_VGA, "VGA" }, + { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, + { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, + { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, + { DRM_MODE_CONNECTOR_Composite, "Composite" }, + { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" }, + { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, + { DRM_MODE_CONNECTOR_Component, "Component" }, + { DRM_MODE_CONNECTOR_9PinDIN, "DIN" }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, + { DRM_MODE_CONNECTOR_TV, "TV" }, + { DRM_MODE_CONNECTOR_eDP, "eDP" }, + { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, }; static const struct drm_prop_enum_list drm_encoder_enum_list[] = @@ -220,6 +220,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, }; +void drm_connector_ida_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_init(&drm_connector_enum_list[i].ida); +} + +void drm_connector_ida_destroy(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_destroy(&drm_connector_enum_list[i].ida); +} + const char *drm_get_encoder_name(const struct drm_encoder *encoder) { static char buf[32]; @@ -711,6 +727,8 @@ int drm_connector_init(struct drm_device *dev, int connector_type) { int ret; + struct ida *connector_ida = + &drm_connector_enum_list[connector_type].ida; drm_modeset_lock_all(dev); @@ -723,7 +741,12 @@ int drm_connector_init(struct drm_device *dev, connector->funcs = funcs; connector->connector_type = connector_type; connector->connector_type_id = - ++drm_connector_enum_list[connector_type].count; /* TODO */ + ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); + if (connector->connector_type_id < 0) { + ret = connector->connector_type_id; + drm_mode_object_put(dev, &connector->base); + goto out; + } INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; @@ -764,6 +787,9 @@ void drm_connector_cleanup(struct drm_connector *connector) list_for_each_entry_safe(mode, t, &connector->modes, head) drm_mode_remove(connector, mode); + ida_remove(&drm_connector_enum_list[connector->connector_type].ida, + connector->connector_type_id); + drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 5b949a736712..d97976cc51cd 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -226,6 +226,7 @@ static int __init drm_core_init(void) int ret = -ENOMEM; drm_global_init(); + drm_connector_ida_init(); idr_init(&drm_minors_idr); if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) @@ -273,6 +274,7 @@ static void __exit drm_core_exit(void) unregister_chrdev(DRM_MAJOR, "drm"); + drm_connector_ida_destroy(); idr_destroy(&drm_minors_idr); } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 32e0820357e6..45f133228553 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -869,6 +869,8 @@ extern int drm_crtc_init(struct drm_device *dev, const struct drm_crtc_funcs *funcs); extern void drm_crtc_cleanup(struct drm_crtc *crtc); +extern void drm_connector_ida_init(void); +extern void drm_connector_ida_destroy(void); extern int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, -- cgit v1.2.3 From 2bc7b0ca8cc3bdcf61a7d4a99ed55c4ad084a4ae Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Tue, 13 Aug 2013 14:19:58 +0200 Subject: drm/host1x: stop casting VMA offsets to 32bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VMA offsets are 64bit so do not cast them to "unsigned int". Also remove the (now useless) offset-retrieval helper. The VMA manager provides simple enough helpers. Cc: Thierry Reding Cc: "Terje Bergström" Cc: Arto Merilainen Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/host1x/drm/drm.c | 2 +- drivers/gpu/host1x/drm/gem.c | 7 +------ drivers/gpu/host1x/drm/gem.h | 1 - 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c index 15684bf073fb..8c61ceeaa12d 100644 --- a/drivers/gpu/host1x/drm/drm.c +++ b/drivers/gpu/host1x/drm/drm.c @@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data, bo = to_tegra_bo(gem); - args->offset = tegra_bo_get_mmap_offset(bo); + args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); drm_gem_object_unreference(gem); diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c index 3c35622c9f15..59623de4ee15 100644 --- a/drivers/gpu/host1x/drm/gem.c +++ b/drivers/gpu/host1x/drm/gem.c @@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); } -unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo) -{ - return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node); -} - struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) { struct tegra_bo *bo; @@ -227,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, bo = to_tegra_bo(gem); - *offset = tegra_bo_get_mmap_offset(bo); + *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); drm_gem_object_unreference(gem); diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h index 2e93b0379da8..492533a2dacb 100644 --- a/drivers/gpu/host1x/drm/gem.h +++ b/drivers/gpu/host1x/drm/gem.h @@ -44,7 +44,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, unsigned int size, unsigned int *handle); void tegra_bo_free_object(struct drm_gem_object *gem); -unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo); int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args); int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, -- cgit v1.2.3 From c1d6798d20eed38b842eee01813ca6c48630d563 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:30 +0200 Subject: drm: use common drm_gem_dmabuf_release in i915/exynos drivers Note that this is slightly tricky since both drivers store their native objects in dma_buf->priv. But both also embed the base drm_gem_object at the first position, so the implicit cast is ok. To use the release helper we need to export it, too. Cc: Inki Dae Cc: Intel Graphics Development Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 3 ++- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 23 +---------------------- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 13 +------------ include/drm/drmP.h | 1 + 4 files changed, 5 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 85e450e3241c..a35f206bdc34 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -192,7 +192,7 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, /* nothing to be done here */ } -static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) +void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; @@ -202,6 +202,7 @@ static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) drm_gem_object_unreference_unlocked(obj); } } +EXPORT_SYMBOL(drm_gem_dmabuf_release); static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index a0f997e0cbdf..3cd56e16a0ef 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -127,27 +127,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, /* Nothing to do. */ } -static void exynos_dmabuf_release(struct dma_buf *dmabuf) -{ - struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv; - - /* - * exynos_dmabuf_release() call means that file object's - * f_count is 0 and it calls drm_gem_object_handle_unreference() - * to drop the references that these values had been increased - * at drm_prime_handle_to_fd() - */ - if (exynos_gem_obj->base.export_dma_buf == dmabuf) { - exynos_gem_obj->base.export_dma_buf = NULL; - - /* - * drop this gem object refcount to release allocated buffer - * and resources. - */ - drm_gem_object_unreference_unlocked(&exynos_gem_obj->base); - } -} - static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) { @@ -193,7 +172,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = { .kunmap = exynos_gem_dmabuf_kunmap, .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, .mmap = exynos_gem_dmabuf_mmap, - .release = exynos_dmabuf_release, + .release = drm_gem_dmabuf_release, }; struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index f2e185c9038f..63ee1a9f7978 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -90,17 +90,6 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, kfree(sg); } -static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) -{ - struct drm_i915_gem_object *obj = dma_buf->priv; - - if (obj->base.export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->base.export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(&obj->base); - } -} - static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; @@ -211,7 +200,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, - .release = i915_gem_dmabuf_release, + .release = drm_gem_dmabuf_release, .kmap = i915_gem_dmabuf_kmap, .kmap_atomic = i915_gem_dmabuf_kmap_atomic, .kunmap = i915_gem_dmabuf_kunmap, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 91f343c8b160..5ebed3ea10ba 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1478,6 +1478,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); +extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From d0ed8d27faf0030d2659502ec0c47d1a8de3defa Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Thu, 15 Aug 2013 00:02:31 +0200 Subject: drm/exynos: explicit store base gem object in dma_buf->priv Signed-off-by: Inki Dae Signed-off-by: Kyungmin Park Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 3cd56e16a0ef..fd76449cf452 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -22,6 +22,11 @@ struct exynos_drm_dmabuf_attachment { bool is_mapped; }; +static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) +{ + return to_exynos_gem_obj(buf->priv); +} + static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, struct device *dev, struct dma_buf_attachment *attach) @@ -63,7 +68,7 @@ static struct sg_table * enum dma_data_direction dir) { struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; - struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; + struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct scatterlist *rd, *wr; @@ -180,7 +185,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, { struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, + return dma_buf_export(obj, &exynos_dmabuf_ops, exynos_gem_obj->base.size, flags); } @@ -198,8 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; - exynos_gem_obj = dma_buf->priv; - obj = &exynos_gem_obj->base; + obj = dma_buf->priv; /* is it from our device? */ if (obj->dev == drm_dev) { -- cgit v1.2.3 From 01ce605a7bd8f4aaaf3c0accdaa5e106982b698c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:32 +0200 Subject: drm/prime: remove cargo-cult locking from map_sg helper I've checked both implementations (radeon/nouveau) and they both grab the page array from ttm simply by dereferencing it and then wrapping it up with drm_prime_pages_to_sg in the callback and map it with dma_map_sg (in the helper). Only the grabbing of the underlying page array is anything we need to be concerned about, and either those pages are pinned independently, or we're screwed no matter what. And indeed, nouveau/radeon pin the backing storage in their attach/detach functions. Since I've created this patch cma prime support for dma_buf was added. drm_gem_cma_prime_get_sg_table only calls kzalloc and the creates&maps the sg table with dma_get_sgtable. It doesn't touch any gem object state otherwise. So the cma helpers also look safe. The only thing we might claim it does is prevent concurrent mapping of dma_buf attachments. But a) that's not allowed and b) the current code is racy already since it checks whether the sg mapping exists _before_ grabbing the lock. So the dev->struct_mutex locking here does absolutely nothing useful, but only distracts. Remove it. This should also help Maarten's work to eventually pin the backing storage more dynamically by preventing locking inversions around dev->struct_mutex. v2: Add analysis for recently added cma helper prime code. Cc: Laurent Pinchart Cc: Maarten Lankhorst Acked-by: Laurent Pinchart Acked-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a35f206bdc34..f1159624c68e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -167,8 +167,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, if (WARN_ON(prime_attach->dir != DMA_NONE)) return ERR_PTR(-EBUSY); - mutex_lock(&obj->dev->struct_mutex); - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); if (!IS_ERR(sgt)) { @@ -182,7 +180,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, } } - mutex_unlock(&obj->dev->struct_mutex); return sgt; } -- cgit v1.2.3 From 7106bf96f81b0c207aaab4b46aa2acc5cab334d4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:33 +0200 Subject: drm/prime: add a bit of documentation about gem_obj->import_attach Lifetime rules seem to be solid around ->import_attach. So this patch just properly documents them. Note that pointing directly at the attachment might have issues for devices that have multiple struct device *dev parts constituting the logical gpu and so might need multiple attachment points. Similarly for drm devices which don't need a dma attachment at all (like udl). But fixing that up is material for different patches. Reviewed-by: Rob Clark Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- include/drm/drmP.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5ebed3ea10ba..5dc98947375c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -665,7 +665,16 @@ struct drm_gem_object { /* dma buf exported from this GEM object */ struct dma_buf *export_dma_buf; - /* dma buf attachment backing this object */ + /** + * import_attach - dma buf attachment backing this object + * + * Any foreign dma_buf imported as a gem object has this set to the + * attachment point for the device. This is invariant over the lifetime + * of a gem object. + * + * The driver's ->gem_free_object callback is responsible for cleaning + * up the dma_buf attachment and references acquired at import time. + */ struct dma_buf_attachment *import_attach; }; -- cgit v1.2.3 From 36da5908a275d6319c17e758b5bde89b4f573959 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:34 +0200 Subject: drm/gem: move drm_gem_object_handle_unreference_unlocked into drm_gem.c We have three callers of this function now and it's neither performance critical nor really small. So an inline function feels like overkill and unecessarily separates the different parts of the code. Since all callers of drm_gem_object_handle_free are now in drm_gem.c we can make that static (and remove the unused EXPORT_SYMBOL). To avoid a forward declaration move it (and drm_gem_object_free_bug) up a bit. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 89 ++++++++++++++++++++++++++++------------------- include/drm/drmP.h | 21 +---------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a4c8e8fba599..f3c1bbcf807f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -201,6 +201,60 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) } } +static void drm_gem_object_ref_bug(struct kref *list_kref) +{ + BUG(); +} + +/** + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory + */ +static void drm_gem_object_handle_free(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + /* Remove any name for this object */ + spin_lock(&dev->object_name_lock); + if (obj->name) { + idr_remove(&dev->object_name_idr, obj->name); + obj->name = 0; + spin_unlock(&dev->object_name_lock); + /* + * The object name held a reference to this object, drop + * that now. + * + * This cannot be the last reference, since the handle holds one too. + */ + kref_put(&obj->refcount, drm_gem_object_ref_bug); + } else + spin_unlock(&dev->object_name_lock); + +} + +void +drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) +{ + if (obj == NULL) + return; + + if (atomic_read(&obj->handle_count) == 0) + return; + + /* + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before we + * checked for a name + */ + + if (atomic_dec_and_test(&obj->handle_count)) + drm_gem_object_handle_free(obj); + drm_gem_object_unreference_unlocked(obj); +} + /** * Removes the mapping from handle to filp for this object. */ @@ -656,41 +710,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -static void drm_gem_object_ref_bug(struct kref *list_kref) -{ - BUG(); -} - -/** - * Called after the last handle to the object has been closed - * - * Removes any name for the object. Note that this must be - * called before drm_gem_object_free or we'll be touching - * freed memory - */ -void drm_gem_object_handle_free(struct drm_gem_object *obj) -{ - struct drm_device *dev = obj->dev; - - /* Remove any name for this object */ - spin_lock(&dev->object_name_lock); - if (obj->name) { - idr_remove(&dev->object_name_idr, obj->name); - obj->name = 0; - spin_unlock(&dev->object_name_lock); - /* - * The object name held a reference to this object, drop - * that now. - * - * This cannot be the last reference, since the handle holds one too. - */ - kref_put(&obj->refcount, drm_gem_object_ref_bug); - } else - spin_unlock(&dev->object_name_lock); - -} -EXPORT_SYMBOL(drm_gem_object_handle_free); - void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5dc98947375c..0ef8e5481e15 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1555,7 +1555,6 @@ int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); -void drm_gem_object_handle_free(struct drm_gem_object *obj); void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma); int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, @@ -1602,25 +1601,7 @@ drm_gem_object_handle_reference(struct drm_gem_object *obj) atomic_inc(&obj->handle_count); } -static inline void -drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) -{ - if (obj == NULL) - return; - - if (atomic_read(&obj->handle_count) == 0) - return; - - /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ - - if (atomic_dec_and_test(&obj->handle_count)) - drm_gem_object_handle_free(obj); - drm_gem_object_unreference_unlocked(obj); -} +void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj); void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); -- cgit v1.2.3 From 6bc505b86ae9d4ab45464e3e3c0ab8992d6a5aff Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:35 +0200 Subject: drm/gem: remove bogus NULL check from drm_gem_object_handle_unreference_unlocked Calling this function with a NULL object is simply a bug, so papering over a NULL object not a good idea. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f3c1bbcf807f..44978bcf8125 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -238,9 +238,6 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { - if (obj == NULL) - return; - if (atomic_read(&obj->handle_count) == 0) return; -- cgit v1.2.3 From 1216f732379151cd581444e385a8266d0b54549d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:36 +0200 Subject: drm/gem: WARN about unbalanced handle refcounts Trying to drop a reference we don't have is a pretty serious bug. Trying to paper over it is an even worse offense. So scream into dmesg with a big WARN in case that ever happens. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 44978bcf8125..dcbd2f559e39 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -238,7 +238,7 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { - if (atomic_read(&obj->handle_count) == 0) + if (WARN_ON(atomic_read(&obj->handle_count) == 0)) return; /* -- cgit v1.2.3 From 281856477cdaba70032af502ee7192fe7aa54f69 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:27 +0200 Subject: drm: rip out drm_core_has_MTRR checks The new arch_phys_wc_add/del functions do the right thing both with and without MTRR support in the kernel. So we can drop these additional checks. David Herrmann suggest to also kill the DRIVER_USE_MTRR flag since it's now unused, which spurred me to do a bit a better audit of the affected drivers. David helped a lot in that. Quoting our mail discussion: On Wed, Jul 10, 2013 at 5:41 PM, David Herrmann wrote: > On Wed, Jul 10, 2013 at 5:22 PM, Daniel Vetter wrote: >> On Wed, Jul 10, 2013 at 3:51 PM, David Herrmann wrote: >>>> -#if __OS_HAS_MTRR >>>> -static inline int drm_core_has_MTRR(struct drm_device *dev) >>>> -{ >>>> - return drm_core_check_feature(dev, DRIVER_USE_MTRR); >>>> -} >>>> -#else >>>> -#define drm_core_has_MTRR(dev) (0) >>>> -#endif >>>> - >>> >>> That was the last user of DRIVER_USE_MTRR (apart from drivers setting >>> it in .driver_features). Any reason to keep it around? >> >> Yeah, I guess we could rip things out. Which will also force me to >> properly audit drivers for the eventual behaviour change this could >> entail (in case there's an x86 driver which did not ask for an mtrr, >> but iirc there isn't). > > david@david-mb ~/dev/kernel/linux $ for i in drivers/gpu/drm/* ; do if > test -d "$i" ; then if ! grep -q USE_MTRR -r $i ; then echo $i ; fi ; > fi ; done > drivers/gpu/drm/exynos > drivers/gpu/drm/gma500 > drivers/gpu/drm/i2c > drivers/gpu/drm/nouveau > drivers/gpu/drm/omapdrm > drivers/gpu/drm/qxl > drivers/gpu/drm/rcar-du > drivers/gpu/drm/shmobile > drivers/gpu/drm/tilcdc > drivers/gpu/drm/ttm > drivers/gpu/drm/udl > drivers/gpu/drm/vmwgfx > david@david-mb ~/dev/kernel/linux $ > > So for x86 gma500,nouveau,qxl,udl,vmwgfx don't set DRIVER_USE_MTRR. > But I cannot tell whether they break if we call arch_phys_wc_add/del, > anyway. At least nouveau seemed to work here, but it doesn't use AGP > or drm_bufs, I guess. Cool, thanks a lot for stitching together the list of drivers to look at. So for real KMS drivers it's the drives responsibility to add an mtrr if it needs one. nouvea, radeon, mgag200, i915 and vmwgfx do that already. Somehow the savage driver also ends up doing that, I have no idea why. Note that gma500 as a pure KMS driver doesn't need MTRR setup since the platforms that it supports all support PAT. So no MTRRs needed to get wc iomappings. The mtrr support in the drm core is all for legacy mappings of garts, framebuffers and registers. All legacy drivers set the USE_MTRR flag, so we're good there. All in all I think we can really just ditch this /endquote v2: Also kill DRIVER_USE_MTRR as suggested by David Herrmann v3: Rebase on top of David Herrmann's agp setup/cleanup changes. Cc: David Herrmann Cc: Andy Lutomirski Signed-off-by: Daniel Vetter Acked-by: Andy Lutomirski Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_drv.c | 2 +- drivers/gpu/drm/cirrus/cirrus_drv.c | 2 +- drivers/gpu/drm/drm_bufs.c | 13 +++++-------- drivers/gpu/drm/drm_pci.c | 14 ++++++-------- drivers/gpu/drm/drm_vm.c | 3 +-- drivers/gpu/drm/i810/i810_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/mga/mga_drv.c | 2 +- drivers/gpu/drm/mgag200/mgag200_drv.c | 2 +- drivers/gpu/drm/r128/r128_drv.c | 2 +- drivers/gpu/drm/radeon/radeon_drv.c | 4 ++-- drivers/gpu/drm/savage/savage_drv.c | 2 +- drivers/gpu/drm/sis/sis_drv.c | 2 +- drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - drivers/gpu/drm/via/via_drv.c | 2 +- include/drm/drmP.h | 11 ----------- 16 files changed, 24 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 60f1ce3998c3..32e270dc714e 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -197,7 +197,7 @@ static const struct file_operations ast_fops = { }; static struct drm_driver driver = { - .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM, + .driver_features = DRIVER_MODESET | DRIVER_GEM, .dev_priv_size = 0, .load = ast_driver_load, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index dd9c908ab3fc..138364d91782 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -87,7 +87,7 @@ static const struct file_operations cirrus_driver_fops = { #endif }; static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR, + .driver_features = DRIVER_MODESET | DRIVER_GEM, .load = cirrus_driver_load, .unload = cirrus_driver_unload, .fops = &cirrus_driver_fops, diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 5f73f0af6125..f63133b0a9ab 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, return 0; } - if (drm_core_has_MTRR(dev)) { - if (map->type == _DRM_FRAME_BUFFER || - (map->flags & _DRM_WRITE_COMBINING)) { - map->mtrr = - arch_phys_wc_add(map->offset, map->size); - } + if (map->type == _DRM_FRAME_BUFFER || + (map->flags & _DRM_WRITE_COMBINING)) { + map->mtrr = + arch_phys_wc_add(map->offset, map->size); } if (map->type == _DRM_REGISTERS) { if (map->flags & _DRM_WRITE_COMBINING) @@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) iounmap(map->handle); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: - if (drm_core_has_MTRR(dev)) - arch_phys_wc_del(map->mtrr); + arch_phys_wc_del(map->mtrr); break; case _DRM_SHM: vfree(map->handle); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 0f54ad8a9ced..3fca2db1c40c 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -272,12 +272,11 @@ static int drm_pci_agp_init(struct drm_device *dev) DRM_ERROR("Cannot initialize the agpgart module.\n"); return -EINVAL; } - if (drm_core_has_MTRR(dev)) { - if (dev->agp) - dev->agp->agp_mtrr = arch_phys_wc_add( - dev->agp->agp_info.aper_base, - dev->agp->agp_info.aper_size * - 1024 * 1024); + if (dev->agp) { + dev->agp->agp_mtrr = arch_phys_wc_add( + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * + 1024 * 1024); } } return 0; @@ -286,8 +285,7 @@ static int drm_pci_agp_init(struct drm_device *dev) static void drm_pci_agp_destroy(struct drm_device *dev) { if (drm_core_has_AGP(dev) && dev->agp) { - if (drm_core_has_MTRR(dev)) - arch_phys_wc_del(dev->agp->agp_mtrr); + arch_phys_wc_del(dev->agp->agp_mtrr); drm_agp_clear(dev); drm_agp_destroy(dev->agp); dev->agp = NULL; diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index feb20035b2c4..b5c5af7328df 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: - if (drm_core_has_MTRR(dev)) - arch_phys_wc_del(map->mtrr); + arch_phys_wc_del(map->mtrr); iounmap(map->handle); break; case _DRM_SHM: diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index d85c05b4877d..d8180d22cedd 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_HAVE_DMA, .dev_priv_size = sizeof(drm_i810_buf_priv_t), .load = i810_driver_load, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9411a745adaf..eec47bd00353 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1006,7 +1006,7 @@ static struct drm_driver driver = { * deal with them for Intel hardware. */ .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, .load = i915_driver_load, .unload = i915_driver_unload, diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index fe71e1e44e48..6b1a87c8aac5 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -58,7 +58,7 @@ static const struct file_operations mga_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_mga_buf_priv_t), .load = mga_driver_load, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index b570127ae3b2..fcce7b2f8011 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -88,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = { }; static struct drm_driver driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR, + .driver_features = DRIVER_GEM | DRIVER_MODESET, .load = mgag200_driver_load, .unload = mgag200_driver_unload, .fops = &mgag200_driver_fops, diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index c2338cbc56ad..5bd307cd8da1 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -56,7 +56,7 @@ static const struct file_operations r128_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_r128_buf_priv_t), .load = r128_driver_load, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 3e52331124de..1f93dd503646 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -275,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = { static struct drm_driver driver_old = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_radeon_buf_priv_t), .load = radeon_driver_load, @@ -382,7 +382,7 @@ static const struct file_operations radeon_driver_kms_fops = { static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | + DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, .dev_priv_size = 0, diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 9135c8bd6fbc..3c030216e888 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -50,7 +50,7 @@ static const struct file_operations savage_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, + DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, .dev_priv_size = sizeof(drm_savage_buf_priv_t), .load = savage_driver_load, .firstopen = savage_driver_firstopen, diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index b88b2d302105..4383b74a3aa4 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -102,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file) } static struct drm_driver driver = { - .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, + .driver_features = DRIVER_USE_AGP, .load = sis_driver_load, .unload = sis_driver_unload, .open = sis_driver_open, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 951ec13e4e5c..3492ca5c46d3 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -55,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = { }; static struct drm_driver driver = { - .driver_features = DRIVER_USE_MTRR, .fops = &tdfx_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 448799968a06..92684a9b7e34 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -72,7 +72,7 @@ static const struct file_operations via_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | + DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = via_driver_load, .unload = via_driver_unload, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0ef8e5481e15..139d859adf16 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -74,7 +74,6 @@ #include #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) -#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) struct module; @@ -139,7 +138,6 @@ int drm_err(const char *func, const char *format, ...); /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 #define DRIVER_REQUIRE_AGP 0x2 -#define DRIVER_USE_MTRR 0x4 #define DRIVER_PCI_DMA 0x8 #define DRIVER_SG 0x10 #define DRIVER_HAVE_DMA 0x20 @@ -1216,15 +1214,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev) return dev->driver->bus->get_irq(dev); } -#if __OS_HAS_MTRR -static inline int drm_core_has_MTRR(struct drm_device *dev) -{ - return drm_core_check_feature(dev, DRIVER_USE_MTRR); -} -#else -#define drm_core_has_MTRR(dev) (0) -#endif - static inline void drm_device_set_unplugged(struct drm_device *dev) { smp_wmb(); -- cgit v1.2.3 From 2ba5f7d538976a9d6a70339da4be49f6652fe753 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:28 +0200 Subject: drm/docs: rip out removed driver flags documentation I've forgotten this and shuffling all the little pieces into the respective patches is rather cumbersome ... Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index f97d08ba59be..6869b9b534b5 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -155,13 +155,6 @@ will become a fatal error. - - DRIVER_USE_MTRR - - Driver uses MTRR interface for mapping memory, the DRM core will - manage MTRR resources. Deprecated. - - DRIVER_PCI_DMA @@ -194,28 +187,6 @@ support shared IRQs (note that this is required of PCI drivers). - - DRIVER_IRQ_VBL - Unused. Deprecated. - - - DRIVER_DMA_QUEUE - - Should be set if the driver queues DMA requests and completes them - asynchronously. Deprecated. - - - - DRIVER_FB_DMA - - Driver supports DMA to/from the framebuffer, mapping of frambuffer - DMA buffers to userspace will be supported. Deprecated. - - - - DRIVER_IRQ_VBL2 - Unused. Deprecated. - DRIVER_GEM -- cgit v1.2.3 From 6eb9278adabd17da3bc1cb843c729d1b10d79c93 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:29 +0200 Subject: drm: remove the dma_ioctl special-case We might as well have a real ioctl function which checks for the callbacks. This seems to be a remnant from back in the days when each drm driver had their own complete ioctl table, with no shared core drm table at all. To make really sure no mis-guided user in a kms driver pops up again explicitly check for that in the new ioctl implementation. v2: Drop the unused variable I've accidentally left in the code, spotted by David Herrmann. Cc: David Herrmann Signed-off-by: Daniel Vetter Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 12 ++++++++++++ drivers/gpu/drm/drm_drv.c | 6 +----- include/drm/drmP.h | 2 ++ 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index f63133b0a9ab..471e051d295e 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1455,6 +1455,18 @@ int drm_mapbufs(struct drm_device *dev, void *data, return retcode; } +int drm_dma_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + if (dev->driver->dma_ioctl) + return dev->driver->dma_ioctl(dev, data, file_priv); + else + return -EINVAL; +} + struct drm_local_map *drm_getsarea(struct drm_device *dev) { struct drm_map_list *entry; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index d97976cc51cd..357a14ea3cb0 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), - /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ - DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -397,9 +396,6 @@ long drm_ioctl(struct file *filp, /* Do not trust userspace, use our own definition */ func = ioctl->func; - /* is there a local override? */ - if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) - func = dev->driver->dma_ioctl; if (!func) { DRM_DEBUG("no function\n"); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 139d859adf16..808eb237be0e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1355,6 +1355,8 @@ extern int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_dma_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* DMA support (drm_dma.h) */ extern int drm_legacy_dma_setup(struct drm_device *dev); -- cgit v1.2.3 From 90254ac084a6465e46cdada933bf3a7e9ee90277 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:33 +0200 Subject: drm: fix locking in gem debugfs/procfs file The idr is protected with our spinlock, if we don't hold that nothing prevents the gem objects from disappearing from under us. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_info.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index d4b20ceda3fb..9f8fc4c328c9 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; seq_printf(m, " name size handles refcount\n"); + + spin_lock(&dev->object_name_lock); idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); + spin_unlock(&dev->object_name_lock); + return 0; } -- cgit v1.2.3 From 24f400326793341b005546d6ef82770f8d30d092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Thu, 8 Aug 2013 19:10:21 +0200 Subject: drm: fix minor number range calculation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, both ranges overlap. Fix the limits so both ranges are mutually exclusive. Also use the occasion to convert whitespaces to tabs. Signed-off-by: Kristian Høgsberg (fixed up tabs and adjust commit-msg accordingly) Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_stub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index aa0664d91060..5edf938bbcc3 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -113,12 +113,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type) int base = 0, limit = 63; if (type == DRM_MINOR_CONTROL) { - base += 64; - limit = base + 127; - } else if (type == DRM_MINOR_RENDER) { - base += 128; - limit = base + 255; - } + base += 64; + limit = base + 63; + } else if (type == DRM_MINOR_RENDER) { + base += 128; + limit = base + 63; + } mutex_lock(&dev->struct_mutex); ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); -- cgit v1.2.3 From b5dc0d108cd3c0b50ddcb6f6c54be1bea4c39e01 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:13 +0200 Subject: drm/imx: kill firstopen callback This thing seems to do some kind of delayed setup. Really, real kms drivers shouldn't do that at all. Either stuff needs to be dynamically hotplugged or the driver setup sequence needs to be fixed. This patch here just moves the setup at the very end of the driver load callback, with the locking adjusted accordingly. v2: Also move the corresponding put from ->lastclose to ->unload. Cc: Sascha Hauer Cc: Greg Kroah-Hartman Signed-off-by: Daniel Vetter Acked-by: Greg Kroah-Hartman Signed-off-by: Dave Airlie --- drivers/staging/imx-drm/imx-drm-core.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 29607c25b261..e826086ec308 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -69,28 +69,20 @@ struct imx_drm_connector { struct module *owner; }; -static int imx_drm_driver_firstopen(struct drm_device *drm) -{ - if (!imx_drm_device_get()) - return -EINVAL; - - return 0; -} - static void imx_drm_driver_lastclose(struct drm_device *drm) { struct imx_drm_device *imxdrm = drm->dev_private; if (imxdrm->fbhelper) drm_fbdev_cma_restore_mode(imxdrm->fbhelper); - - imx_drm_device_put(); } static int imx_drm_driver_unload(struct drm_device *drm) { struct imx_drm_device *imxdrm = drm->dev_private; + imx_drm_device_put(); + drm_mode_config_cleanup(imxdrm->drm); drm_kms_helper_poll_fini(imxdrm->drm); @@ -225,8 +217,6 @@ struct drm_device *imx_drm_device_get(void) struct imx_drm_connector *con; struct imx_drm_crtc *crtc; - mutex_lock(&imxdrm->mutex); - list_for_each_entry(enc, &imxdrm->encoder_list, list) { if (!try_module_get(enc->owner)) { dev_err(imxdrm->dev, "could not get module %s\n", @@ -253,8 +243,6 @@ struct drm_device *imx_drm_device_get(void) imxdrm->references++; - mutex_unlock(&imxdrm->mutex); - return imxdrm->drm; unwind_crtc: @@ -446,6 +434,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) */ imxdrm->drm->vblank_disable_allowed = 1; + if (!imx_drm_device_get()) + ret = -EINVAL; + ret = 0; err_init: @@ -790,7 +781,6 @@ static struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, .load = imx_drm_driver_load, .unload = imx_drm_driver_unload, - .firstopen = imx_drm_driver_firstopen, .lastclose = imx_drm_driver_lastclose, .gem_free_object = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, -- cgit v1.2.3 From 0faa4a877765a4855dd570d6d391f77c5c37abc3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:14 +0200 Subject: drm/vmwgfx: remove ->firstopen callback So if we survey kms drivers there's a bunch of things they commonly do in ->lastclose - delayed processing of vga switcheroo requests (i915, nouveau, radeon) - force-restoring the fbcon (most) - resetting a bunch properties to make fbcon work better (omap) - disabling all outputs (vmwgfx) In short besides the semantically important vga switcheroo stuff they all try very hard to keep fbcon working in case X dies. But none of them try to not do this at driver unload time safe for vmwgfx, and digging through logs I couldn't find any reason for why vmwgfx is special. Since ->firstopen has lots of potential for abuse with kms drivers (like delaying driver setup to pamper over races in the load sequence) it's imo very much worth it to remove this logic so that we can stop using the ->firstopen callback for kms drivers. Also module unloading is rather a debug feature and developers should know how to restore the display to a sane configuration. Cc: Jakob Bornecrantz Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 18 ------------------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 - 2 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2dd1919485e4..0dcfa6b76c45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -795,29 +795,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, return drm_ioctl(filp, cmd, arg); } -static int vmw_firstopen(struct drm_device *dev) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - dev_priv->is_opened = true; - - return 0; -} - static void vmw_lastclose(struct drm_device *dev) { - struct vmw_private *dev_priv = vmw_priv(dev); struct drm_crtc *crtc; struct drm_mode_set set; int ret; - /** - * Do nothing on the lastclose call from drm_unload. - */ - - if (!dev_priv->is_opened) - return; - - dev_priv->is_opened = false; set.x = 0; set.y = 0; set.fb = NULL; @@ -1131,7 +1114,6 @@ static struct drm_driver driver = { DRIVER_MODESET, .load = vmw_driver_load, .unload = vmw_driver_unload, - .firstopen = vmw_firstopen, .lastclose = vmw_lastclose, .irq_preinstall = vmw_irq_preinstall, .irq_postinstall = vmw_irq_postinstall, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 13aeda71280e..150ec64af617 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -324,7 +324,6 @@ struct vmw_private { */ bool stealth; - bool is_opened; bool enable_fb; /** -- cgit v1.2.3 From 7d14bb6b537414ffe6a8641cb14088465c28460d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:15 +0200 Subject: drm: don't call ->firstopen for KMS drivers It has way too much potential for driver writers to do stupid things like delayed hw setup because the load sequence is somehow racy (e.g. the imx driver in staging). So don't call it for modesetting drivers, which reduces the complexity of the drm core -> driver interface a notch. v2: Don't forget to update DocBook. v3: Go with Laurent's slightly more elaborate proposal for the DocBook update. Add a few words on top of his diff to elaborate a bit on what KMS drivers should and shouldn't do in lastclose. There was already a paragraph present talking about restoring properties, I've simply extended that one. Cc: Laurent Pinchart Acked-by: Laurent Pinchart Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 27 ++++++++++++++++----------- drivers/gpu/drm/drm_fops.c | 3 ++- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 6869b9b534b5..9fc8ed4ac0f4 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -2405,18 +2405,18 @@ void (*postclose) (struct drm_device *, struct drm_file *); The firstopen method is called by the DRM core - when an application opens a device that has no other opened file handle. - Similarly the lastclose method is called when - the last application holding a file handle opened on the device closes - it. Both methods are mostly used for UMS (User Mode Setting) drivers to - acquire and release device resources which should be done in the - load and unload - methods for KMS drivers. + for legacy UMS (User Mode Setting) drivers only when an application + opens a device that has no other opened file handle. UMS drivers can + implement it to acquire device resources. KMS drivers can't use the + method and must acquire resources in the load + method instead. - Note that the lastclose method is also called - at module unload time or, for hot-pluggable devices, when the device is - unplugged. The firstopen and + Similarly the lastclose method is called when + the last application holding a file handle opened on the device closes + it, for both UMS and KMS drivers. Additionally, the method is also + called at module unload time or, for hot-pluggable devices, when the + device is unplugged. The firstopen and lastclose calls can thus be unbalanced. @@ -2445,7 +2445,12 @@ void (*postclose) (struct drm_device *, struct drm_file *); The lastclose method should restore CRTC and plane properties to default value, so that a subsequent open of the - device will not inherit state from the previous user. + device will not inherit state from the previous user. It can also be + used to execute delayed power switching state changes, e.g. in + conjunction with the vga-switcheroo infrastructure. Beyond that KMS + drivers should not do any further cleanup. Only legacy UMS drivers might + need to clean up device state so that the vga console or an independent + fbdev driver could take over. diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 1817f03efe80..567997116d7e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -51,7 +51,8 @@ static int drm_setup(struct drm_device * dev) int i; int ret; - if (dev->driver->firstopen) { + if (dev->driver->firstopen && + !drm_core_check_feature(dev, DRIVER_MODESET)) { ret = dev->driver->firstopen(dev); if (ret != 0) return ret; -- cgit v1.2.3 From cb6458f97b53d7f73043206c18014b3ca63ac345 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:34 +0200 Subject: drm: remove procfs code, take 2 So almost two years ago I've tried to nuke the procfs code already once before: http://lists.freedesktop.org/archives/dri-devel/2011-October/015707.html The conclusion was that userspace drivers (specifically libdrm device node detection) stopped relying on procfs in 2001. But after some digging it turned out that the drmstat tool in libdrm is still using those files (but only when certain options are set). So we've decided to keep profcs. But I when I've started to dig around again what exactly this tool does I've noticed that it tries to read the "mem", "vm", and "vma" files from procfs. Now as far my git history digging shows "mem" never did anything useful (at least in the version that first showed up in upstream history in 2004) and the file was remove in commit 955b12def42e83287c1bdb1411d99451753c1391 Author: Ben Gamari Date: Tue Feb 17 20:08:49 2009 -0500 drm: Convert proc files to seq_file and introduce debugfs Which means that for over 4 years drmstat has been broken, and no one cared. In my opinion that's proof enough that no one is actually using drmstat, and so that we can savely nuke the procfs support from drm. While at it fix up the error case cleanup for debugfs in drm_get_minor. v2: Fix dates, libdrm stopped relying on procfs for drm node detection in 2001. v3: fixup compilation warning for !CONFIG_DEBUG_FS, reported by Fengguang Wu. Cc: kbuild test robot Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_drv.c | 8 -- drivers/gpu/drm/drm_proc.c | 209 --------------------------------------------- drivers/gpu/drm/drm_stub.c | 32 ++----- include/drm/drmP.h | 8 -- 5 files changed, 9 insertions(+), 250 deletions(-) delete mode 100644 drivers/gpu/drm/drm_proc.c diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2aaf082368a2..7b2343a2f5eb 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -7,7 +7,7 @@ ccflags-y := -Iinclude/drm drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ - drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ + drm_lock.o drm_memory.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 357a14ea3cb0..cf40be42baf6 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -238,13 +238,6 @@ static int __init drm_core_init(void) goto err_p2; } - drm_proc_root = proc_mkdir("dri", NULL); - if (!drm_proc_root) { - DRM_ERROR("Cannot create /proc/dri\n"); - ret = -1; - goto err_p3; - } - drm_debugfs_root = debugfs_create_dir("dri", NULL); if (!drm_debugfs_root) { DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); @@ -267,7 +260,6 @@ err_p1: static void __exit drm_core_exit(void) { - remove_proc_entry("dri", NULL); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c deleted file mode 100644 index d7f2324b4fb1..000000000000 --- a/drivers/gpu/drm/drm_proc.c +++ /dev/null @@ -1,209 +0,0 @@ -/** - * \file drm_proc.c - * /proc support for DRM - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - * - * \par Acknowledgements: - * Matthew J Sottek sent in a patch to fix - * the problem with the proc files not outputting all their information. - */ - -/* - * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include - -/*************************************************** - * Initialization, etc. - **************************************************/ - -/** - * Proc file list. - */ -static const struct drm_info_list drm_proc_list[] = { - {"name", drm_name_info, 0}, - {"vm", drm_vm_info, 0}, - {"clients", drm_clients_info, 0}, - {"bufs", drm_bufs_info, 0}, - {"gem_names", drm_gem_name_info, DRIVER_GEM}, -#if DRM_DEBUG_CODE - {"vma", drm_vma_info, 0}, -#endif -}; -#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) - -static int drm_proc_open(struct inode *inode, struct file *file) -{ - struct drm_info_node* node = PDE_DATA(inode); - - return single_open(file, node->info_ent->show, node); -} - -static const struct file_operations drm_proc_fops = { - .owner = THIS_MODULE, - .open = drm_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - - -/** - * Initialize a given set of proc files for a device - * - * \param files The array of files to create - * \param count The number of files given - * \param root DRI proc dir entry. - * \param minor device minor number - * \return Zero on success, non-zero on failure - * - * Create a given set of proc files represented by an array of - * gdm_proc_lists in the given root directory. - */ -static int drm_proc_create_files(const struct drm_info_list *files, int count, - struct proc_dir_entry *root, struct drm_minor *minor) -{ - struct drm_device *dev = minor->dev; - struct proc_dir_entry *ent; - struct drm_info_node *tmp; - int i; - - for (i = 0; i < count; i++) { - u32 features = files[i].driver_features; - - if (features != 0 && - (dev->driver->driver_features & features) != features) - continue; - - tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); - if (!tmp) - return -1; - - tmp->minor = minor; - tmp->info_ent = &files[i]; - list_add(&tmp->list, &minor->proc_nodes.list); - - ent = proc_create_data(files[i].name, S_IRUGO, root, - &drm_proc_fops, tmp); - if (!ent) { - DRM_ERROR("Cannot create /proc/dri/%u/%s\n", - minor->index, files[i].name); - list_del(&tmp->list); - kfree(tmp); - return -1; - } - } - return 0; -} - -/** - * Initialize the DRI proc filesystem for a device - * - * \param dev DRM device - * \param root DRI proc dir entry. - * \param dev_root resulting DRI device proc dir entry. - * \return root entry pointer on success, or NULL on failure. - * - * Create the DRI proc root entry "/proc/dri", the device proc root entry - * "/proc/dri/%minor%/", and each entry in proc_list as - * "/proc/dri/%minor%/%name%". - */ -int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root) -{ - char name[12]; - int ret; - - INIT_LIST_HEAD(&minor->proc_nodes.list); - sprintf(name, "%u", minor->index); - minor->proc_root = proc_mkdir(name, root); - if (!minor->proc_root) { - DRM_ERROR("Cannot create /proc/dri/%s\n", name); - return -1; - } - - ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, - minor->proc_root, minor); - if (ret) { - remove_proc_subtree(name, root); - minor->proc_root = NULL; - DRM_ERROR("Failed to create core drm proc files\n"); - return ret; - } - - return 0; -} - -static int drm_proc_remove_files(const struct drm_info_list *files, int count, - struct drm_minor *minor) -{ - struct list_head *pos, *q; - struct drm_info_node *tmp; - int i; - - for (i = 0; i < count; i++) { - list_for_each_safe(pos, q, &minor->proc_nodes.list) { - tmp = list_entry(pos, struct drm_info_node, list); - if (tmp->info_ent == &files[i]) { - remove_proc_entry(files[i].name, - minor->proc_root); - list_del(pos); - kfree(tmp); - } - } - } - return 0; -} - -/** - * Cleanup the proc filesystem resources. - * - * \param minor device minor number. - * \param root DRI proc dir entry. - * \param dev_root DRI device proc dir entry. - * \return always zero. - * - * Remove all proc entries created by proc_init(). - */ -int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) -{ - char name[64]; - - if (!root || !minor->proc_root) - return 0; - - drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); - - sprintf(name, "%d", minor->index); - remove_proc_subtree(name, root); - return 0; -} - diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 5edf938bbcc3..e30bb0d7c67a 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -68,7 +68,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); struct idr drm_minors_idr; struct class *drm_class; -struct proc_dir_entry *drm_proc_root; struct dentry *drm_debugfs_root; int drm_err(const char *func, const char *format, ...) @@ -315,9 +314,8 @@ EXPORT_SYMBOL(drm_fill_in_dev); * \param sec-minor structure to hold the assigned minor * \return negative number on failure. * - * Search an empty entry and initialize it to the given parameters, and - * create the proc init entry via proc_init(). This routines assigns - * minor numbers to secondary heads of multi-headed cards + * Search an empty entry and initialize it to the given parameters. This + * routines assigns minor numbers to secondary heads of multi-headed cards */ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) { @@ -345,20 +343,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) idr_replace(&drm_minors_idr, new_minor, minor_id); - if (type == DRM_MINOR_LEGACY) { - ret = drm_proc_init(new_minor, drm_proc_root); - if (ret) { - DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); - goto err_mem; - } - } else - new_minor->proc_root = NULL; - #if defined(CONFIG_DEBUG_FS) ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); - goto err_g2; + goto err_mem; } #endif @@ -366,7 +355,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) if (ret) { printk(KERN_ERR "DRM: Error sysfs_device_add.\n"); - goto err_g2; + goto err_debugfs; } *minor = new_minor; @@ -374,10 +363,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) return 0; -err_g2: - if (new_minor->type == DRM_MINOR_LEGACY) - drm_proc_cleanup(new_minor, drm_proc_root); +err_debugfs: +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_cleanup(new_minor); err_mem: +#endif kfree(new_minor); err_idr: idr_remove(&drm_minors_idr, minor_id); @@ -391,10 +381,6 @@ EXPORT_SYMBOL(drm_get_minor); * * \param sec_minor - structure to be released * \return always zero - * - * Cleans up the proc resources. Not legal for this to be the - * last minor released. - * */ int drm_put_minor(struct drm_minor **minor_p) { @@ -402,8 +388,6 @@ int drm_put_minor(struct drm_minor **minor_p) DRM_DEBUG("release secondary minor %d\n", minor->index); - if (minor->type == DRM_MINOR_LEGACY) - drm_proc_cleanup(minor, drm_proc_root); #if defined(CONFIG_DEBUG_FS) drm_debugfs_cleanup(minor); #endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 808eb237be0e..39911dca359b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -1027,8 +1026,6 @@ struct drm_minor { struct device kdev; /**< Linux device */ struct drm_device *dev; - struct proc_dir_entry *proc_root; /**< proc directory entry */ - struct drm_info_node proc_nodes; struct dentry *debugfs_root; struct list_head debugfs_list; @@ -1438,17 +1435,12 @@ extern unsigned int drm_timestamp_precision; extern unsigned int drm_timestamp_monotonic; extern struct class *drm_class; -extern struct proc_dir_entry *drm_proc_root; extern struct dentry *drm_debugfs_root; extern struct idr drm_minors_idr; extern struct drm_local_map *drm_getsarea(struct drm_device *dev); - /* Proc support (drm_proc.h) */ -extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root); -extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); - /* Debugfs support */ #if defined(CONFIG_DEBUG_FS) extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, -- cgit v1.2.3 From f336ab76008f66f6153573d1479aeed388d7b08a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 15:41:35 +0200 Subject: drm: move dev data clearing from drm_setup to lastclose We kzalloc this structure, and for real kms devices we should never loose track of things really. But ums/legacy drivers rely on the drm core to clean up a bit of cruft between lastclose and firstopen (i.e. when X is being restarted), so keep this around. But give it a clear drm_legacy_ prefix and conditionalize the code on !DRIVER_MODESET. Cc: David Herrmann Signed-off-by: Daniel Vetter Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/drm_fops.c | 27 +++------------------------ 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index cf40be42baf6..288da3dc2a09 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -170,6 +170,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = { #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) +/** + * drm_legacy_dev_reinit + * + * Reinitializes a legacy/ums drm device in it's lastclose function. + */ +static void drm_legacy_dev_reinit(struct drm_device *dev) +{ + int i; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + + for (i = 0; i < ARRAY_SIZE(dev->counts); i++) + atomic_set(&dev->counts[i], 0); + + dev->sigdata.lock = NULL; + + dev->context_flag = 0; + dev->last_context = 0; + dev->if_version = 0; +} + /** * Take down the DRM device. * @@ -209,6 +234,8 @@ int drm_lastclose(struct drm_device * dev) dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); + drm_legacy_dev_reinit(dev); + DRM_DEBUG("lastclose completed\n"); return 0; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 567997116d7e..59f459291093 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -48,7 +48,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, static int drm_setup(struct drm_device * dev) { - int i; int ret; if (dev->driver->firstopen && @@ -58,32 +57,12 @@ static int drm_setup(struct drm_device * dev) return ret; } - atomic_set(&dev->ioctl_count, 0); - atomic_set(&dev->vma_count, 0); - - i = drm_legacy_dma_setup(dev); - if (i < 0) - return i; - - for (i = 0; i < ARRAY_SIZE(dev->counts); i++) - atomic_set(&dev->counts[i], 0); - - dev->sigdata.lock = NULL; + ret = drm_legacy_dma_setup(dev); + if (ret < 0) + return ret; - dev->context_flag = 0; - dev->last_context = 0; - dev->if_version = 0; DRM_DEBUG("\n"); - - /* - * The kernel's context could be created here, but is now created - * in drm_dma_enqueue. This is more resource-efficient for - * hardware that does not do DMA, but may mean that - * drm_select_queue fails between the time the interrupt is - * initialized and the time the queues are initialized. - */ - return 0; } -- cgit v1.2.3 From f51607ac8d4d09b59faf7c6d718f413f537a7b34 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:03 +0100 Subject: drm: Remove stale prototypes A few prototypes have been left in the headers, their function friends long gone. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- include/drm/drmP.h | 5 ----- include/drm/drm_crtc.h | 11 ----------- 2 files changed, 16 deletions(-) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 39911dca359b..046a7db43a6c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1371,7 +1371,6 @@ extern int drm_irq_uninstall(struct drm_device *dev); extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); -extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime); @@ -1491,10 +1490,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); -int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); -int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, - struct drm_gem_object **obj); - #if DRM_DEBUG_CODE extern int drm_vma_info(struct seq_file *m, void *data); #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 45f133228553..eb40a967fc65 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -948,9 +948,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj, extern int drm_object_property_get_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t *value); -extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev); -extern void drm_framebuffer_set_object(struct drm_device *dev, - unsigned long handle); extern int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs); @@ -961,10 +958,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb); extern void drm_framebuffer_remove(struct drm_framebuffer *fb); extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); -extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); -extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); -extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); -extern bool drm_crtc_in_use(struct drm_crtc *crtc); extern void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, @@ -1039,10 +1032,6 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mode_hotplug_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); -extern int drm_mode_replacefb(struct drm_device *dev, - void *data, struct drm_file *file_priv); extern int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, -- cgit v1.2.3 From ddecb10cf402a8325579f298fd4986a90f33496b Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:04 +0100 Subject: drm: Remove drm_mode_create_dithering_property() This was last used by nouveau, replaced by a driver-specific property in: commit de69185573586302ada2e59ba41835df36986277 Author: Ben Skeggs Date: Mon Oct 17 12:23:41 2011 +1000 drm/nouveau: improve dithering properties, and implement proper auto mode Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 31 ------------------------------- include/drm/drm_crtc.h | 1 - 2 files changed, 32 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a6917645fb4a..ffb791f95d45 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; -static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] = -{ - { DRM_MODE_DITHERING_OFF, "Off" }, - { DRM_MODE_DITHERING_ON, "On" }, - { DRM_MODE_DITHERING_AUTO, "Automatic" }, -}; - /* * Non-global properties, but "required" for certain connectors. */ @@ -1160,30 +1153,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev) } EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); -/** - * drm_mode_create_dithering_property - create dithering property - * @dev: DRM device - * - * Called by a driver the first time it's needed, must be attached to desired - * connectors. - */ -int drm_mode_create_dithering_property(struct drm_device *dev) -{ - struct drm_property *dithering_mode; - - if (dev->mode_config.dithering_mode_property) - return 0; - - dithering_mode = - drm_property_create_enum(dev, 0, "dithering", - drm_dithering_mode_enum_list, - ARRAY_SIZE(drm_dithering_mode_enum_list)); - dev->mode_config.dithering_mode_property = dithering_mode; - - return 0; -} -EXPORT_SYMBOL(drm_mode_create_dithering_property); - /** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index eb40a967fc65..781988f637ec 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -982,7 +982,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, char *formats[]); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); -extern int drm_mode_create_dithering_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev); extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); -- cgit v1.2.3 From 67587e8689ffbb788468c738f07a9678bde51084 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:05 +0100 Subject: drm: Remove drm_mode_list_concat() The last user was removed in commit 575dc34ee0de867ba83abf25998e0963bff451fa Author: Dave Airlie Date: Mon Sep 7 18:43:26 2009 +1000 drm/kms: remove old std mode fallback code. The new code adds modes in the helper, which makes more sense I disliked the non-driver code adding modes. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 21 --------------------- include/drm/drm_crtc.h | 2 -- 2 files changed, 23 deletions(-) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 504a602f495c..fc2adb62b757 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -595,27 +595,6 @@ void drm_mode_set_name(struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_mode_set_name); -/** - * drm_mode_list_concat - move modes from one list to another - * @head: source list - * @new: dst list - * - * LOCKING: - * Caller must ensure both lists are locked. - * - * Move all the modes from @head to @new. - */ -void drm_mode_list_concat(struct list_head *head, struct list_head *new) -{ - - struct list_head *entry, *tmp; - - list_for_each_safe(entry, tmp, head) { - list_move_tail(entry, new); - } -} -EXPORT_SYMBOL(drm_mode_list_concat); - /** * drm_mode_width - get the width of a mode * @mode: mode diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 781988f637ec..39d84dc7abcf 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -927,8 +927,6 @@ extern int drm_mode_height(const struct drm_display_mode *mode); /* for us by fb module */ extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); -extern void drm_mode_list_concat(struct list_head *head, - struct list_head *new); extern void drm_mode_validate_size(struct drm_device *dev, struct list_head *mode_list, int maxX, int maxY, int maxPitch); -- cgit v1.2.3 From 86f422d5be001cfe311fc46d60a6e3ef6868dd40 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:06 +0100 Subject: drm: Make drm_mode_remove() static It's only used in drm_crtc.c. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 7 +++---- include/drm/drm_crtc.h | 1 - 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ffb791f95d45..54b4169fc48e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -686,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector, } EXPORT_SYMBOL(drm_mode_probed_add); -/** +/* * drm_mode_remove - remove and free a mode * @connector: connector list to modify * @mode: mode to remove * * Remove @mode from @connector's mode list, then free it. */ -void drm_mode_remove(struct drm_connector *connector, - struct drm_display_mode *mode) +static void drm_mode_remove(struct drm_connector *connector, + struct drm_display_mode *mode) { list_del(&mode->head); drm_mode_destroy(connector->dev, mode); } -EXPORT_SYMBOL(drm_mode_remove); /** * drm_connector_init - Init a preallocated connector diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 39d84dc7abcf..014e491c3c1c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -910,7 +910,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); -extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); -- cgit v1.2.3 From a03eb8388d91eb1c8dae79b790e6560134977a15 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:07 +0100 Subject: drm: Remove 2 unused defines These were introduced in the very first DRM commit: commit f453ba0460742ad027ae0c4c7d61e62817b3e7ef Author: Dave Airlie Date: Fri Nov 7 14:05:41 2008 -0800 DRM: add mode setting support Add mode setting support to the DRM layer. But are unused. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- include/drm/drm_crtc.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 014e491c3c1c..0a9f73e8be26 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -494,8 +494,6 @@ struct drm_encoder_funcs { void (*destroy)(struct drm_encoder *encoder); }; -#define DRM_CONNECTOR_MAX_UMODES 16 -#define DRM_CONNECTOR_LEN 32 #define DRM_CONNECTOR_MAX_ENCODER 3 /** -- cgit v1.2.3 From 2c9c52e85318c67cfc70f4885ad543abf0f845e6 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:08 +0100 Subject: drm: Make drm_fb_cma_describe() static This function is only used in drm_fb_cma_helper.c. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_cma_helper.c | 5 ++--- include/drm/drm_fb_cma_helper.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index c385cc5e730e..61b5a47ad239 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); #ifdef CONFIG_DEBUG_FS -/** +/* * drm_fb_cma_describe() - Helper to dump information about a single * CMA framebuffer object */ -void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) +static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); int i, n = drm_format_num_planes(fb->pixel_format); @@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) drm_gem_cma_describe(fb_cma->obj[i], m); } } -EXPORT_SYMBOL_GPL(drm_fb_cma_describe); /** * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 4a3fc244301c..c54cf3d4a03f 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -24,7 +24,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); #ifdef CONFIG_DEBUG_FS -void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m); int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); #endif -- cgit v1.2.3 From 15f3b9d95bf2c85afcf11e7536c17eaeedfcefe7 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:09 +0100 Subject: drm: Remove unused PCI ids Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- include/drm/drm_pciids.h | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 34efaf64cc87..0a85e5c5d61b 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -690,29 +690,6 @@ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ {0, 0, 0} -#define mach64_PCI_IDS \ - {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0, 0, 0} - #define sisdrv_PCI_IDS \ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ @@ -752,10 +729,6 @@ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0, 0, 0} -#define gamma_PCI_IDS \ - {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0, 0, 0} - #define savage_PCI_IDS \ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ @@ -781,6 +754,3 @@ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ {0, 0, 0} - -#define ffb_PCI_IDS \ - {0, 0, 0} -- cgit v1.2.3 From 66cc8b6b8b2b3b5c9a67429af04ec356ff7fcfa4 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Tue, 20 Aug 2013 00:53:10 +0100 Subject: drm: Make drm_get_platform_dev() static It's only used in drm_platform.c. Signed-off-by: Damien Lespiau Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_platform.c | 7 +++---- include/drm/drmP.h | 3 --- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index b8a282ea8751..400024b6d512 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -28,7 +28,7 @@ #include #include -/** +/* * Register. * * \param platdev - Platform device struture @@ -39,8 +39,8 @@ * Try and register, if we fail to register, backout previous work. */ -int drm_get_platform_dev(struct platform_device *platdev, - struct drm_driver *driver) +static int drm_get_platform_dev(struct platform_device *platdev, + struct drm_driver *driver) { struct drm_device *dev; int ret; @@ -107,7 +107,6 @@ err_g1: mutex_unlock(&drm_global_mutex); return ret; } -EXPORT_SYMBOL(drm_get_platform_dev); static int drm_platform_get_irq(struct drm_device *dev) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 046a7db43a6c..1a7a78fdb4b7 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1657,9 +1657,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device); -extern int drm_get_platform_dev(struct platform_device *pdev, - struct drm_driver *driver); - /* returns true if currently okay to sleep */ static __inline__ bool drm_can_sleep(void) { -- cgit v1.2.3 From a8e11d1c435f9d185c9f3b1981b9613a579b9999 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:37 +0200 Subject: drm/gem: fix up flink name create race This is the 2nd attempt, I've always been a bit dissatisified with the tricky nature of the first one: http://lists.freedesktop.org/archives/dri-devel/2012-July/025451.html The issue is that the flink ioctl can race with calling gem_close on the last gem handle. In that case we'll end up with a zero handle count, but an flink name (and it's corresponding reference). Which results in a neat space leak. In my first attempt I've solved this by rechecking the handle count. But fundamentally the issue is that ->handle_count isn't your usual refcount - it can be resurrected from 0 among other things. For those special beasts atomic_t often suggest way more ordering that it actually guarantees. To prevent being tricked by those hairy semantics take the easy way out and simply protect the handle with the existing dev->object_name_lock. With that change implemented it's dead easy to fix the flink vs. gem close reace: When we try to create the name we simply have to check whether there's still officially a gem handle around and if not refuse to create the flink name. Since the handle count decrement and flink name destruction is now also protected by that lock the reace is gone and we can't ever leak the flink reference again. Outside of the drm core only the exynos driver looks at the handle count, and tbh I have no idea why (it's just for debug dmesg output luckily). I've considered inlining the drm_gem_object_handle_free, but I plan to add more name-like things (like the exported dma_buf) to this scheme, so it's clearer to leave the handle freeing in its own function. This is exercised by the new gem_flink_race i-g-t testcase, which on my snb leaks gem objects at a rate of roughly 1k objects/s. v2: Fix up the error path handling in handle_create and make it more robust by simply calling object_handle_unreference. v3: Fix up the handle_unreference logic bug - atomic_dec_and_test retursn 1 for 0. Oops. v4: Squash in inlining of drm_gem_object_handle_reference as suggested by Dave Airlie and add a note that we now have a testcase. Cc: Dave Airlie Cc: Inki Dae Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 31 ++++++++++++++++++++----------- drivers/gpu/drm/drm_info.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 2 +- include/drm/drmP.h | 19 ++++++++++--------- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index dcbd2f559e39..b8a8132becef 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -154,7 +154,7 @@ void drm_gem_private_object_init(struct drm_device *dev, obj->filp = NULL; kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); + obj->handle_count = 0; obj->size = size; } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -218,11 +218,9 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; /* Remove any name for this object */ - spin_lock(&dev->object_name_lock); if (obj->name) { idr_remove(&dev->object_name_idr, obj->name); obj->name = 0; - spin_unlock(&dev->object_name_lock); /* * The object name held a reference to this object, drop * that now. @@ -230,15 +228,13 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) * This cannot be the last reference, since the handle holds one too. */ kref_put(&obj->refcount, drm_gem_object_ref_bug); - } else - spin_unlock(&dev->object_name_lock); - + } } void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { - if (WARN_ON(atomic_read(&obj->handle_count) == 0)) + if (WARN_ON(obj->handle_count == 0)) return; /* @@ -247,8 +243,11 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) * checked for a name */ - if (atomic_dec_and_test(&obj->handle_count)) + spin_lock(&obj->dev->object_name_lock); + if (--obj->handle_count == 0) drm_gem_object_handle_free(obj); + spin_unlock(&obj->dev->object_name_lock); + drm_gem_object_unreference_unlocked(obj); } @@ -326,17 +325,21 @@ drm_gem_handle_create(struct drm_file *file_priv, * allocation under our spinlock. */ idr_preload(GFP_KERNEL); + spin_lock(&dev->object_name_lock); spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); - + drm_gem_object_reference(obj); + obj->handle_count++; spin_unlock(&file_priv->table_lock); + spin_unlock(&dev->object_name_lock); idr_preload_end(); - if (ret < 0) + if (ret < 0) { + drm_gem_object_handle_unreference_unlocked(obj); return ret; + } *handlep = ret; - drm_gem_object_handle_reference(obj); if (dev->driver->gem_open_object) { ret = dev->driver->gem_open_object(obj, file_priv); @@ -577,6 +580,12 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, idr_preload(GFP_KERNEL); spin_lock(&dev->object_name_lock); + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + ret = -ENOENT; + goto err; + } + if (!obj->name) { ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); if (ret < 0) diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 9f8fc4c328c9..5351e811c421 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data) seq_printf(m, "%6d %8zd %7d %8d\n", obj->name, obj->size, - atomic_read(&obj->handle_count), + obj->handle_count, atomic_read(&obj->refcount.refcount)); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index b904633863e8..f3c6f40666e1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -136,7 +136,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) obj = &exynos_gem_obj->base; buf = exynos_gem_obj->buffer; - DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); + DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); /* * do not release memory region from exporter. diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1a7a78fdb4b7..57dce6081d73 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -615,8 +615,16 @@ struct drm_gem_object { /** Reference count of this object */ struct kref refcount; - /** Handle count of this object. Each handle also holds a reference */ - atomic_t handle_count; /* number of handles on this object */ + /** + * handle_count - gem file_priv handle count of this object + * + * Each handle also holds a reference. Note that when the handle_count + * drops to 0 any global names (e.g. the id in the flink namespace) will + * be cleared. + * + * Protected by dev->object_name_lock. + * */ + unsigned handle_count; /** Related drm device */ struct drm_device *dev; @@ -1572,13 +1580,6 @@ int drm_gem_handle_create(struct drm_file *file_priv, u32 *handlep); int drm_gem_handle_delete(struct drm_file *filp, u32 handle); -static inline void -drm_gem_object_handle_reference(struct drm_gem_object *obj) -{ - drm_gem_object_reference(obj); - atomic_inc(&obj->handle_count); -} - void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj); void drm_gem_free_mmap_offset(struct drm_gem_object *obj); -- cgit v1.2.3 From 730c4ff95eb54e5bab39357baddd0aa6da10d4fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:38 +0200 Subject: drm/prime: fix error path in drm_gem_prime_fd_to_handle handle_unreference only clears up the obj->name and the reference, but would leave a dangling handle in the idr. The right thing to do is to call handle_delete. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index f1159624c68e..82cd83e62e7d 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -476,7 +476,7 @@ fail: /* hmm, if driver attached, we are relying on the free-object path * to detach.. which seems ok.. */ - drm_gem_object_handle_unreference_unlocked(obj); + drm_gem_handle_delete(file_priv, *handle); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3 From becee2a57fd2b64c53ebef58277fbca895cf8ec1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:39 +0200 Subject: drm/gem: make drm_gem_object_handle_unreference_unlocked static No one outside of drm should use this, the official interfaces are drm_gem_handle_create and drm_gem_handle_delete. The handle refcounting is purely an implementation detail of gem. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 2 +- include/drm/drmP.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b8a8132becef..52548fd34b15 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -231,7 +231,7 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) } } -void +static void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { if (WARN_ON(obj->handle_count == 0)) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 57dce6081d73..7782dbbbe126 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1580,7 +1580,6 @@ int drm_gem_handle_create(struct drm_file *file_priv, u32 *handlep); int drm_gem_handle_delete(struct drm_file *filp, u32 handle); -void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj); void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); -- cgit v1.2.3 From 4332bf438bbbc31319abed61d2ac6d9932ff980c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:41 +0200 Subject: drm/prime: use proper pointer in drm_gem_prime_handle_to_fd Part of the function uses the properly-typed dmabuf variable, the other an untyped void *buf. Kill the later. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 82cd83e62e7d..c2d6d54e10e0 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -303,7 +303,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, int *prime_fd) { struct drm_gem_object *obj; - void *buf; int ret = 0; struct dma_buf *dmabuf; @@ -323,15 +322,15 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, goto out_have_obj; } - buf = dev->driver->gem_prime_export(dev, obj, flags); - if (IS_ERR(buf)) { + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ - ret = PTR_ERR(buf); + ret = PTR_ERR(dmabuf); goto out; } - obj->export_dma_buf = buf; + obj->export_dma_buf = dmabuf; /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back @@ -341,7 +340,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, if (ret) goto fail_put_dmabuf; - ret = dma_buf_fd(buf, flags); + ret = dma_buf_fd(dmabuf, flags); if (ret < 0) goto fail_rm_handle; @@ -362,11 +361,12 @@ out_have_obj: goto out; fail_rm_handle: - drm_prime_remove_buf_handle_locked(&file_priv->prime, buf); + drm_prime_remove_buf_handle_locked(&file_priv->prime, + dmabuf); fail_put_dmabuf: /* clear NOT to be checked when releasing dma_buf */ obj->export_dma_buf = NULL; - dma_buf_put(buf); + dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3 From bdf655de47b0d17ee2efc3bea5f617445ff77adc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:42 +0200 Subject: drm/prime: shrink critical section protected by prime lock When exporting a gem object as a dma-buf the critical section for the per-fd prime lock is just the adding (and in case of errors, removing) of the handle to the per-fd lookup cache. So restrict the critical section to just that part of the function. This simplifies later reordering. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index c2d6d54e10e0..cb0451679e81 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -310,7 +310,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, if (!obj) return -ENOENT; - mutex_lock(&file_priv->prime.lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; @@ -332,6 +331,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, } obj->export_dma_buf = dmabuf; + mutex_lock(&file_priv->prime.lock); /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ @@ -363,13 +363,13 @@ out_have_obj: fail_rm_handle: drm_prime_remove_buf_handle_locked(&file_priv->prime, dmabuf); + mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: /* clear NOT to be checked when releasing dma_buf */ obj->export_dma_buf = NULL; dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); - mutex_unlock(&file_priv->prime.lock); return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); -- cgit v1.2.3 From 84341c280acb8217a301344082c7ad8b9af870a6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:43 +0200 Subject: drm/prime: clarify logic a bit in drm_gem_prime_fd_to_handle if (!ret) implies that ret == 0, so no need to clear it again. And explicitly check for ret == 0 to indicate that we're checking an errno integer. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index cb0451679e81..3d576018893a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -444,10 +444,8 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle); - if (!ret) { - ret = 0; + if (ret == 0) goto out_put; - } /* never seen this one, need to import */ obj = dev->driver->gem_prime_import(dev, dma_buf); -- cgit v1.2.3 From cd4f013f3a4b6a55d484cc2e206dc08e055e5291 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:44 +0200 Subject: drm/gem: switch dev->object_name_lock to a mutex I want to wrap the creation of a dma-buf from a gem object in it, so that the obj->export_dma_buf cache can be atomically filled in. Instead of creating a new mutex just for that variable I've figured I can reuse the existing dev->object_name_lock, especially since the new semantics will exactly mirror the flink obj->name already protected by that lock. v2: idr_preload/idr_preload_end is now an atomic section, so need to move the mutex locking outside. [airlied: fix up conflict with patch to make debugfs use lock] Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 18 +++++++++--------- drivers/gpu/drm/drm_info.c | 4 ++-- include/drm/drmP.h | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 52548fd34b15..adb9eda4fa1a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -93,7 +93,7 @@ drm_gem_init(struct drm_device *dev) { struct drm_gem_mm *mm; - spin_lock_init(&dev->object_name_lock); + mutex_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); @@ -243,10 +243,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) * checked for a name */ - spin_lock(&obj->dev->object_name_lock); + mutex_lock(&obj->dev->object_name_lock); if (--obj->handle_count == 0) drm_gem_object_handle_free(obj); - spin_unlock(&obj->dev->object_name_lock); + mutex_unlock(&obj->dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); } @@ -324,16 +324,16 @@ drm_gem_handle_create(struct drm_file *file_priv, * Get the user-visible handle using idr. Preload and perform * allocation under our spinlock. */ + mutex_lock(&dev->object_name_lock); idr_preload(GFP_KERNEL); - spin_lock(&dev->object_name_lock); spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); drm_gem_object_reference(obj); obj->handle_count++; spin_unlock(&file_priv->table_lock); - spin_unlock(&dev->object_name_lock); idr_preload_end(); + mutex_unlock(&dev->object_name_lock); if (ret < 0) { drm_gem_object_handle_unreference_unlocked(obj); return ret; @@ -578,8 +578,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + mutex_lock(&dev->object_name_lock); idr_preload(GFP_KERNEL); - spin_lock(&dev->object_name_lock); /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { ret = -ENOENT; @@ -601,8 +601,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ret = 0; err: - spin_unlock(&dev->object_name_lock); idr_preload_end(); + mutex_unlock(&dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); return ret; } @@ -625,11 +625,11 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - spin_lock(&dev->object_name_lock); + mutex_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); if (obj) drm_gem_object_reference(obj); - spin_unlock(&dev->object_name_lock); + mutex_unlock(&dev->object_name_lock); if (!obj) return -ENOENT; diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 5351e811c421..53298320080b 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -219,9 +219,9 @@ int drm_gem_name_info(struct seq_file *m, void *data) seq_printf(m, " name size handles refcount\n"); - spin_lock(&dev->object_name_lock); + mutex_lock(&dev->object_name_lock); idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); - spin_unlock(&dev->object_name_lock); + mutex_unlock(&dev->object_name_lock); return 0; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7782dbbbe126..bf058470a0fd 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1196,7 +1196,7 @@ struct drm_device { /** \name GEM information */ /*@{ */ - spinlock_t object_name_lock; + struct mutex object_name_lock; struct idr object_name_idr; /*@} */ int switch_power_state; -- cgit v1.2.3 From 20228c447846da9399ead53fdbbc8ab69b47788a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:45 +0200 Subject: drm/gem: completely close gem_open vs. gem_close races The gem flink name holds a reference onto the object itself, and this self-reference would prevent an flink'ed object from every being freed. To break that loop we remove the flink name when the last userspace handle disappears, i.e. when obj->handle_count reaches 0. Now in gem_open we drop the dev->object_name_lock between the flink name lookup and actually adding the handle. This means a concurrent gem_close of the last handle could result in the flink name getting reaped right inbetween, i.e. Thread 1 Thread 2 gem_open gem_close flink -> obj lookup handle_count drops to 0 remove flink name create_handle handle_count++ If someone now flinks this object again, we'll get a new flink name. We can close this race by removing the lock dropping and making the entire lookup+handle_create sequence atomic. Unfortunately to still be able to share the handle_create logic this requires a handle_create_tail function which drops the lock - we can't hold the object_name_lock while calling into a driver's ->gem_open callback. Note that for flink fixing this race isn't really important, since racing gem_open against gem_close is clearly a userspace bug. And no matter how the race ends, we won't leak any references. But with dma-buf where the userspace dma-buf fd itself is refcounted this is a valid sequence and hence we should fix it. Therefore this patch here is just a warm-up exercise (and for consistency between flink buffer sharing and dma-buf buffer sharing with self-imports). Also note that this extension of the critical section in gem_open protected by dev->object_name_lock only works because it's now a mutex: A spinlock would conflict with the potential memory allocation in idr_preload(). This is exercises by igt/gem_flink_race/flink_name. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 42 +++++++++++++++++++++++++++++++----------- include/drm/drmP.h | 3 +++ 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index adb9eda4fa1a..d47aa774d64b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -308,23 +308,26 @@ int drm_gem_dumb_destroy(struct drm_file *file, EXPORT_SYMBOL(drm_gem_dumb_destroy); /** - * Create a handle for this object. This adds a handle reference - * to the object, which includes a regular reference count. Callers - * will likely want to dereference the object afterwards. + * drm_gem_handle_create_tail - internal functions to create a handle + * + * This expects the dev->object_name_lock to be held already and will drop it + * before returning. Used to avoid races in establishing new handles when + * importing an object from either an flink name or a dma-buf. */ int -drm_gem_handle_create(struct drm_file *file_priv, - struct drm_gem_object *obj, - u32 *handlep) +drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) { struct drm_device *dev = obj->dev; int ret; + WARN_ON(!mutex_is_locked(&dev->object_name_lock)); + /* * Get the user-visible handle using idr. Preload and perform * allocation under our spinlock. */ - mutex_lock(&dev->object_name_lock); idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); @@ -351,6 +354,21 @@ drm_gem_handle_create(struct drm_file *file_priv, return 0; } + +/** + * Create a handle for this object. This adds a handle reference + * to the object, which includes a regular reference count. Callers + * will likely want to dereference the object afterwards. + */ +int +drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) +{ + mutex_lock(&obj->dev->object_name_lock); + + return drm_gem_handle_create_tail(file_priv, obj, handlep); +} EXPORT_SYMBOL(drm_gem_handle_create); @@ -627,13 +645,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); - if (obj) + if (obj) { drm_gem_object_reference(obj); - mutex_unlock(&dev->object_name_lock); - if (!obj) + } else { + mutex_unlock(&dev->object_name_lock); return -ENOENT; + } - ret = drm_gem_handle_create(file_priv, obj, &handle); + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, &handle); drm_gem_object_unreference_unlocked(obj); if (ret) return ret; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index bf058470a0fd..063eac31b97b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1575,6 +1575,9 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) } } +int drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); -- cgit v1.2.3 From 319c933c71f3dbdb2b3274d1634d3494c70efa06 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:46 +0200 Subject: drm/prime: proper locking+refcounting for obj->dma_buf link The export dma-buf cache is semantically similar to an flink name. So semantically it makes sense to treat it the same and remove the name (i.e. the dma_buf pointer) and its references when the last gem handle disappears. Again we need to be careful, but double so: Not just could someone race and export with a gem close ioctl (so we need to recheck obj->handle_count again when assigning the new name), but multiple exports can also race against each another. This is prevented by holding the dev->object_name_lock across the entire section which touches obj->dma_buf. With the new scheme we also need to reinstate the obj->dma_buf link at import time (in case the only reference userspace has held in-between was through the dma-buf fd and not through any native gem handle). For simplicity we don't check whether it's a native object but unconditionally set up that link - with the new scheme of removing the obj->dma_buf reference when the last handle disappears we can do that. To make it clear that this is not just for exported buffers anymore als rename it from export_dma_buf to dma_buf. To make sure that now one can race a fd_to_handle or handle_to_fd with gem_close we use the same tricks as in flink of extending the dev->object_name_locking critical section. With this change we finally have a guaranteed 1:1 relationship (at least for native objects) between gem objects and dma-bufs, even accounting for races (which can happen since the dma-buf itself holds a reference while in-flight). This prevent igt/prime_self_import/export-vs-gem_close-race from Oopsing the kernel. There is still a leak though since the per-file priv dma-buf/handle cache handling is racy. That will be fixed in a later patch. v2: Remove the bogus dma_buf_put from the export_and_register_object failure path if we've raced with the handle count dropping to 0. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 1 + drivers/gpu/drm/drm_gem.c | 24 ++++++++++++++-- drivers/gpu/drm/drm_prime.c | 70 +++++++++++++++++++++++++++++++++++---------- include/drm/drmP.h | 12 ++++++-- 4 files changed, 87 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 59f459291093..2d2401e9c5ae 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -486,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&file_priv->prime); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d47aa774d64b..4b3c533be859 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) drm_prime_remove_buf_handle(&filp->prime, obj->import_attach->dmabuf); } - if (obj->export_dma_buf) { + + /* + * Note: obj->dma_buf can't disappear as long as we still hold a + * handle reference in obj->handle_count. + */ + if (obj->dma_buf) { drm_prime_remove_buf_handle(&filp->prime, - obj->export_dma_buf); + obj->dma_buf); } } @@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) } } +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ + /* Unbreak the reference cycle if we have an exported dma_buf. */ + if (obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } +} + static void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { @@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) */ mutex_lock(&obj->dev->object_name_lock); - if (--obj->handle_count == 0) + if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); + drm_gem_object_exported_dma_buf_free(obj); + } mutex_unlock(&obj->dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); @@ -712,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { + WARN_ON(obj->dma_buf); + if (obj->filp) fput(obj->filp); } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 3d576018893a..5e543e9264d7 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -193,11 +193,8 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; - if (obj->export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(obj); - } + /* drop the reference on the export fd holds */ + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_dmabuf_release); @@ -298,6 +295,37 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, } EXPORT_SYMBOL(drm_gem_prime_export); +static struct dma_buf *export_and_register_object(struct drm_device *dev, + struct drm_gem_object *obj, + uint32_t flags) +{ + struct dma_buf *dmabuf; + + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + dmabuf = ERR_PTR(-ENOENT); + return dmabuf; + } + + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + return dmabuf; + } + + /* + * Note that callers do not need to clean up the export cache + * since the check for obj->handle_count guarantees that someone + * will clean it up. + */ + obj->dma_buf = dmabuf; + get_dma_buf(obj->dma_buf); + + return dmabuf; +} + int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) @@ -313,15 +341,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); goto out_have_obj; } - if (obj->export_dma_buf) { - dmabuf = obj->export_dma_buf; + mutex_lock(&dev->object_name_lock); + if (obj->dma_buf) { + get_dma_buf(obj->dma_buf); + dmabuf = obj->dma_buf; + mutex_unlock(&dev->object_name_lock); goto out_have_obj; } - dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + dmabuf = export_and_register_object(dev, obj, flags); + mutex_unlock(&dev->object_name_lock); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref @@ -329,14 +362,13 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, ret = PTR_ERR(dmabuf); goto out; } - obj->export_dma_buf = dmabuf; mutex_lock(&file_priv->prime.lock); /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ ret = drm_prime_add_buf_handle(&file_priv->prime, - obj->export_dma_buf, handle); + dmabuf, handle); if (ret) goto fail_put_dmabuf; @@ -349,7 +381,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, return 0; out_have_obj: - get_dma_buf(dmabuf); ret = dma_buf_fd(dmabuf, flags); if (ret < 0) { dma_buf_put(dmabuf); @@ -365,8 +396,6 @@ fail_rm_handle: dmabuf); mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: - /* clear NOT to be checked when releasing dma_buf */ - obj->export_dma_buf = NULL; dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); @@ -448,13 +477,22 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, goto out_put; /* never seen this one, need to import */ + mutex_lock(&dev->object_name_lock); obj = dev->driver->gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto out_put; + goto out_unlock; + } + + if (obj->dma_buf) { + WARN_ON(obj->dma_buf != dma_buf); + } else { + obj->dma_buf = dma_buf; + get_dma_buf(dma_buf); } - ret = drm_gem_handle_create(file_priv, obj, handle); + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) goto out_put; @@ -475,6 +513,8 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); +out_unlock: + mutex_lock(&dev->object_name_lock); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 063eac31b97b..a95db49b3f9e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -667,8 +667,16 @@ struct drm_gem_object { void *driver_private; - /* dma buf exported from this GEM object */ - struct dma_buf *export_dma_buf; + /** + * dma_buf - dma buf associated with this GEM object + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by obj->object_name_lock + */ + struct dma_buf *dma_buf; /** * import_attach - dma buf attachment backing this object -- cgit v1.2.3 From 838cd4455ee1c76db06175d44319a8e7ac114b0e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:47 +0200 Subject: drm/prime: Simplify drm_gem_remove_prime_handles with the reworking semantics and locking of the obj->dma_buf pointer this pointer is always set as long as there's still a gem handle around and a dma_buf associated with this gem object. Also, the per file-priv lookup-cache for dma-buf importing is also unified between foreign and native objects. Hence we don't need to special case the clean any more and can simply drop the clause which only runs for foreing objects, i.e. with obj->import_attach set. Note that with this change (actually with the previous one to always set up obj->dma_buf even for foreign objects) it is no longer required to set obj->import_attach when importing a foreing object. So update comments accordingly, too. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 5 ----- include/drm/drmP.h | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4b3c533be859..0a5a0ca0a52e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -191,11 +191,6 @@ EXPORT_SYMBOL(drm_gem_object_alloc); static void drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) { - if (obj->import_attach) { - drm_prime_remove_buf_handle(&filp->prime, - obj->import_attach->dmabuf); - } - /* * Note: obj->dma_buf can't disappear as long as we still hold a * handle reference in obj->handle_count. diff --git a/include/drm/drmP.h b/include/drm/drmP.h index a95db49b3f9e..ce1e6bd30306 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -687,6 +687,11 @@ struct drm_gem_object { * * The driver's ->gem_free_object callback is responsible for cleaning * up the dma_buf attachment and references acquired at import time. + * + * Note that the drm gem/prime core does not depend upon drivers setting + * this field any more. So for drivers where this doesn't make sense + * (e.g. virtual devices or a displaylink behind an usb bus) they can + * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; }; -- cgit v1.2.3 From de9564d8b9e69bf6603521e810d3cb46fa98ad81 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:48 +0200 Subject: drm/prime: make drm_prime_lookup_buf_handle static ... and move it to the top of the function to avoid a forward declaration. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 29 +++++++++++++++-------------- include/drm/drmP.h | 1 - 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 5e543e9264d7..ed1ea5c1a9ca 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -83,6 +83,21 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, return 0; } +static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf, + uint32_t *handle) +{ + struct drm_prime_member *member; + + list_for_each_entry(member, &prime_fpriv->head, entry) { + if (member->dma_buf == dma_buf) { + *handle = member->handle; + return 0; + } + } + return -ENOENT; +} + static int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev, struct dma_buf_attachment *attach) @@ -655,20 +670,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) } EXPORT_SYMBOL(drm_prime_destroy_file_private); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) -{ - struct drm_prime_member *member; - - list_for_each_entry(member, &prime_fpriv->head, entry) { - if (member->dma_buf == dma_buf) { - *handle = member->handle; - return 0; - } - } - return -ENOENT; -} -EXPORT_SYMBOL(drm_prime_lookup_buf_handle); - void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) { mutex_lock(&prime_fpriv->lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index ce1e6bd30306..5914cc5c3fa6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1508,7 +1508,6 @@ int drm_gem_dumb_destroy(struct drm_file *file, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); #if DRM_DEBUG_CODE -- cgit v1.2.3 From d0b2c5334f41bdd18adaa3fbc1f7b5f1daab7eac Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:49 +0200 Subject: drm/prime: Always add exported buffers to the handle cache ... not only when the dma-buf is freshly created. In contrived examples someone else could have exported/imported the dma-buf already and handed us the gem object with a flink name. If such on object gets reexported as a dma_buf we won't have it in the handle cache already, which breaks the guarantee that for dma-buf imports we always hand back an existing handle if there is one. This is exercised by igt/prime_self_import/with_one_bo_two_files Now if we extend the locked sections just a notch more we can also plug th racy buf/handle cache setup in handle_to_fd: If evil userspace races a concurrent gem close against a prime export operation we can end up tearing down the gem handle before the dma buf handle cache is set up. When handle_to_fd gets around to adding the handle to the cache there will be no one left to clean it up, effectily leaking the bo (and the dma-buf, since the handle cache holds a ref on the dma-buf): Thread A Thread B handle_to_fd: lookup gem object from handle creates new dma_buf gem_close on the same handle obj->dma_buf is set, but file priv buf handle cache has no entry obj->handle_count drops to 0 drm_prime_add_buf_handle sets up the handle cache -> We have a dma-buf reference in the handle cache, but since the handle_count of the gem object already dropped to 0 no on will clean it up. When closing the drm device fd we'll hit the WARN_ON in drm_prime_destroy_file_private. The important change is to extend the critical section of the filp->prime.lock to cover the gem handle lookup. This serializes with a concurrent gem handle close. This leak is exercised by igt/prime_self_import/export-vs-gem_close-race Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 6 ++-- drivers/gpu/drm/drm_prime.c | 81 +++++++++++++++++++++++++++------------------ include/drm/drmP.h | 2 +- 3 files changed, 53 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0a5a0ca0a52e..1ce88c3301a1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -195,10 +195,12 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) * Note: obj->dma_buf can't disappear as long as we still hold a * handle reference in obj->handle_count. */ + mutex_lock(&filp->prime.lock); if (obj->dma_buf) { - drm_prime_remove_buf_handle(&filp->prime, - obj->dma_buf); + drm_prime_remove_buf_handle_locked(&filp->prime, + obj->dma_buf); } + mutex_unlock(&filp->prime.lock); } static void drm_gem_object_ref_bug(struct kref *list_kref) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index ed1ea5c1a9ca..7ae2bfcab70e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -83,6 +83,19 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, return 0; } +static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle) +{ + struct drm_prime_member *member; + + list_for_each_entry(member, &prime_fpriv->head, entry) { + if (member->handle == handle) + return member->dma_buf; + } + + return NULL; +} + static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) @@ -146,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf, attach->priv = NULL; } -static void drm_prime_remove_buf_handle_locked( - struct drm_prime_file_private *prime_fpriv, - struct dma_buf *dma_buf) +void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf) { struct drm_prime_member *member, *safe; @@ -337,6 +349,8 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, */ obj->dma_buf = dmabuf; get_dma_buf(obj->dma_buf); + /* Grab a new ref since the callers is now used by the dma-buf */ + drm_gem_object_reference(obj); return dmabuf; } @@ -349,10 +363,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, int ret = 0; struct dma_buf *dmabuf; + mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; + if (!obj) { + ret = -ENOENT; + goto out_unlock; + } + + dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); + if (dmabuf) { + get_dma_buf(dmabuf); + goto out_have_handle; + } + mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; @@ -360,45 +384,45 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, goto out_have_obj; } - mutex_lock(&dev->object_name_lock); if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; - mutex_unlock(&dev->object_name_lock); goto out_have_obj; } dmabuf = export_and_register_object(dev, obj, flags); - mutex_unlock(&dev->object_name_lock); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ ret = PTR_ERR(dmabuf); + mutex_unlock(&dev->object_name_lock); goto out; } - mutex_lock(&file_priv->prime.lock); - /* if we've exported this buffer the cheat and add it to the import list - * so we get the correct handle back +out_have_obj: + /* + * If we've exported this buffer then cheat and add it to the import list + * so we get the correct handle back. We must do this under the + * protection of dev->object_name_lock to ensure that a racing gem close + * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); + mutex_unlock(&dev->object_name_lock); if (ret) goto fail_put_dmabuf; +out_have_handle: ret = dma_buf_fd(dmabuf, flags); - if (ret < 0) - goto fail_rm_handle; - - *prime_fd = ret; - mutex_unlock(&file_priv->prime.lock); - return 0; - -out_have_obj: - ret = dma_buf_fd(dmabuf, flags); + /* + * We must _not_ remove the buffer from the handle cache since the newly + * created dma buf is already linked in the global obj->dma_buf pointer, + * and that is invariant as long as a userspace gem handle exists. + * Closing the handle will clean out the cache anyway, so we don't leak. + */ if (ret < 0) { - dma_buf_put(dmabuf); + goto fail_put_dmabuf; } else { *prime_fd = ret; ret = 0; @@ -406,14 +430,13 @@ out_have_obj: goto out; -fail_rm_handle: - drm_prime_remove_buf_handle_locked(&file_priv->prime, - dmabuf); - mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); +out_unlock: + mutex_unlock(&file_priv->prime.lock); + return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); @@ -669,11 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) WARN_ON(!list_empty(&prime_fpriv->head)); } EXPORT_SYMBOL(drm_prime_destroy_file_private); - -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) -{ - mutex_lock(&prime_fpriv->lock); - drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf); - mutex_unlock(&prime_fpriv->lock); -} -EXPORT_SYMBOL(drm_prime_remove_buf_handle); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5914cc5c3fa6..90833dccc919 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1508,7 +1508,7 @@ int drm_gem_dumb_destroy(struct drm_file *file, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); +void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); #if DRM_DEBUG_CODE extern int drm_vma_info(struct seq_file *m, void *data); -- cgit v1.2.3 From 000433b67e46771a7c08f78574943855a98c53ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 8 Aug 2013 14:41:09 +0100 Subject: drm/i915: Only do a chipset flush after a clflush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we skip clflushes more often, return a boolean indicating whether the clflush was actually performed, and only if it was do the chipset flush. (Though on most of the architectures where the clflush will be skipped, the chipset flush is a no-op!) Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 20 +++++++++++--------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 +++-- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 34d3f2fae8ac..1af59d72ddc7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1841,7 +1841,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error) } void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 54d76e9392d8..959dffba0f0e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -841,8 +841,8 @@ out: */ if (!needs_clflush_after && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj, obj->pin_display); - i915_gem_chipset_flush(dev); + if (i915_gem_clflush_object(obj, obj->pin_display)) + i915_gem_chipset_flush(dev); } } @@ -3224,7 +3224,7 @@ err_unpin: return ret; } -void +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force) { @@ -3233,14 +3233,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, * again at bind time. */ if (obj->pages == NULL) - return; + return false; /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. */ if (obj->stolen) - return; + return false; /* If the GPU is snooping the contents of the CPU cache, * we do not need to manually clear the CPU cache lines. However, @@ -3251,11 +3251,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, * tracking. */ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) - return; + return false; trace_i915_gem_object_clflush(obj); - drm_clflush_sg(obj->pages); + + return true; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -3295,8 +3296,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; - i915_gem_clflush_object(obj, force); - i915_gem_chipset_flush(obj->base.dev); + if (i915_gem_clflush_object(obj, force)) + i915_gem_chipset_flush(obj->base.dev); + old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e999578a021c..7dcf78cf6781 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -708,6 +708,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, { struct drm_i915_gem_object *obj; uint32_t flush_domains = 0; + bool flush_chipset = false; int ret; list_for_each_entry(obj, objects, exec_list) { @@ -716,12 +717,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj, false); + flush_chipset |= i915_gem_clflush_object(obj, false); flush_domains |= obj->base.write_domain; } - if (flush_domains & I915_GEM_DOMAIN_CPU) + if (flush_chipset) i915_gem_chipset_flush(ring->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) -- cgit v1.2.3 From 35c7ab421a13f8327e3fd627c6ebafb1c13b2e55 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 10 Aug 2013 14:51:11 +0200 Subject: drm/i915: reserve I915_CACHING_DISPLAY and document cache modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolve the catch-22 of igt needing a stable number and patches first needing testcases by reserving the interface number up-front. v2: Improve the spelling a bit. v3: More spelling fail spotted by Chris. Requested-by: Chris Wilson Cc: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- include/uapi/drm/i915_drm.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index a1a7b6bd60d8..0bb3e5524382 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -768,8 +768,32 @@ struct drm_i915_gem_busy { __u32 busy; }; +/** + * I915_CACHING_NONE + * + * GPU access is not coherent with cpu caches. Default for machines without an + * LLC. + */ #define I915_CACHING_NONE 0 +/** + * I915_CACHING_CACHED + * + * GPU access is coherent with cpu caches and furthermore the data is cached in + * last-level caches shared between cpu cores and the gpu GT. Default on + * machines with HAS_LLC. + */ #define I915_CACHING_CACHED 1 +/** + * I915_CACHING_DISPLAY + * + * Special GPU caching mode which is coherent with the scanout engines. + * Transparently falls back to I915_CACHING_NONE on platforms where no special + * cache mode (like write-through or gfdt flushing) is available. The kernel + * automatically sets this mode when using a buffer as a scanout target. + * Userspace can manually set this mode to avoid a costly stall and clflush in + * the hotpath of drawing the first frame. + */ +#define I915_CACHING_DISPLAY 2 struct drm_i915_gem_caching { /** -- cgit v1.2.3 From 7ace7ef2f5d20632240196fa3e5d5c74cf2c1508 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 9 Aug 2013 22:12:12 -0700 Subject: drm/i915: WARN_ON failed map_and_fenceable I just noticed in our code we don't really check the assertion, and given some of the code I am changing in this area, I feel a WARN is very nice to have. Signed-off-by: Ben Widawsky [danvet: s/&/&&/ to fix typo on the check.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 959dffba0f0e..e414adaaf11d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3211,6 +3211,8 @@ search_free: if (i915_is_ggtt(vm)) obj->map_and_fenceable = mappable && fenceable; + WARN_ON(map_and_fenceable && !obj->map_and_fenceable); + trace_i915_vma_bind(vma, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; -- cgit v1.2.3 From 1ad87e72b54cf3b698c002f0a31ac34d6b407f0b Mon Sep 17 00:00:00 2001 From: Guillaume Clement Date: Sat, 10 Aug 2013 21:57:57 +0200 Subject: i915: Fix SDVO potentially turning off randomly Some Poulsbo cards seem to incorrectly report SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED instead of SDVO_CMD_STATUS_PENDING, which causes the display to be turned off. This could also happen to i915. Signed-off-by: Guillaume Clement Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 02f220b4e4a1..317e058fb3cf 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -538,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, &status)) goto log_fail; - while (status == SDVO_CMD_STATUS_PENDING && --retry) { + while ((status == SDVO_CMD_STATUS_PENDING || + status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { if (retry < 10) msleep(15); else -- cgit v1.2.3 From c8b5018b22bc03941fbc6dcf7bea5e8344a8f3da Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Sun, 11 Aug 2013 12:44:26 +0300 Subject: drm/i915: remove unused leftover variable irq_received It's been there since i8xx_irq_handler() was added in commit c2798b19bac2538393fc932bfbe59807a4734b3e Author: Chris Wilson Date: Sun Apr 22 21:13:57 2012 +0100 drm/i915: i8xx interrupt handler Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8a77faf4927d..34467ed4f9da 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2403,7 +2403,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) u16 iir, new_iir; u32 pipe_stats[2]; unsigned long irqflags; - int irq_received; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -2437,7 +2436,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -- cgit v1.2.3 From f2f4d82faf85d2e53a2ba00a831a9f7f80b7e6e7 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Sun, 11 Aug 2013 12:44:01 +0300 Subject: drm/i915: give more distinctive names to ring hangcheck action enums The short lowercase names are bound to collide. The default warnings don't even warn about shadowing. Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 22 +++++++++++----------- drivers/gpu/drm/i915/intel_ringbuffer.h | 7 ++++++- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e414adaaf11d..474748ffa7b6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2214,7 +2214,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, offset = i915_gem_obj_offset(request->batch_obj, request_to_vm(request)); - if (ring->hangcheck.action != wait && + if (ring->hangcheck.action != HANGCHECK_WAIT && i915_request_guilty(request, acthd, &inside)) { DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", ring->name, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 34467ed4f9da..06659a72d0cf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1826,10 +1826,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) u32 tmp; if (ring->hangcheck.acthd != acthd) - return active; + return HANGCHECK_ACTIVE; if (IS_GEN2(dev)) - return hung; + return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? * If so we can simply poke the RB_WAIT bit @@ -1841,24 +1841,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) DRM_ERROR("Kicking stuck wait on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); - return kick; + return HANGCHECK_KICK; } if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(ring)) { default: - return hung; + return HANGCHECK_HUNG; case 1: DRM_ERROR("Kicking stuck semaphore on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); - return kick; + return HANGCHECK_KICK; case 0: - return wait; + return HANGCHECK_WAIT; } } - return hung; + return HANGCHECK_HUNG; } /** @@ -1926,16 +1926,16 @@ static void i915_hangcheck_elapsed(unsigned long data) acthd); switch (ring->hangcheck.action) { - case wait: + case HANGCHECK_WAIT: score = 0; break; - case active: + case HANGCHECK_ACTIVE: score = BUSY; break; - case kick: + case HANGCHECK_KICK: score = KICK; break; - case hung: + case HANGCHECK_HUNG: score = HUNG; stuck[i] = true; break; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6e38256d41e1..5e6be842d225 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -37,7 +37,12 @@ struct intel_hw_status_page { #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) -enum intel_ring_hangcheck_action { wait, active, kick, hung }; +enum intel_ring_hangcheck_action { + HANGCHECK_WAIT, + HANGCHECK_ACTIVE, + HANGCHECK_KICK, + HANGCHECK_HUNG, +}; struct intel_ring_hangcheck { bool deadlock; -- cgit v1.2.3 From ea04cb31d506ac3f4fc3cefb1c50eb4f35ab37fd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Sun, 11 Aug 2013 12:44:02 +0300 Subject: drm/i915: drop unnecessary local variable to suppress build warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although I could not reproduce this (different compiler version, perhaps), reportedly we get: drivers/gpu/drm/i915/i915_irq.c:1943:27: warning: ‘score’ may be used uninitialized in this function [-Wuninitialized] Drop the 'score' variable altogether as it's not really needed. Reported-by: Kees Cook Signed-off-by: Jani Nikula Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 06659a72d0cf..28d57477aa42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1905,8 +1905,6 @@ static void i915_hangcheck_elapsed(unsigned long data) } else busy = false; } else { - int score; - /* We always increment the hangcheck score * if the ring is busy and still processing * the same request, so that no single request @@ -1927,20 +1925,18 @@ static void i915_hangcheck_elapsed(unsigned long data) switch (ring->hangcheck.action) { case HANGCHECK_WAIT: - score = 0; break; case HANGCHECK_ACTIVE: - score = BUSY; + ring->hangcheck.score += BUSY; break; case HANGCHECK_KICK: - score = KICK; + ring->hangcheck.score += KICK; break; case HANGCHECK_HUNG: - score = HUNG; + ring->hangcheck.score += HUNG; stuck[i] = true; break; } - ring->hangcheck.score += score; } } else { /* Gradually reduce the count so that we catch DoS -- cgit v1.2.3 From 651d794fae9b79237aae1c97f8a9d9f3817bd31d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 8 Aug 2013 14:41:10 +0100 Subject: drm/i915: Use Write-Through cacheing for the display plane on Iris MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Haswell GT3e has the unique feature of supporting Write-Through cacheing of objects within the eLLC/LLC. The purpose of this is to enable the display plane to remain coherent whilst objects lie resident in the eLLC/LLC - so that we, in theory, get the best of both worlds, perfect display and fast access. However, we still need to be careful as the CPU does not see the WT when accessing the cache. In particular, this means that we need to flush the cache lines after writing to an object through the CPU, and on transitioning from a cached state to WT. v2: Actually do the clflush on transition to WT, nagging by Ville. v3: Flush the CPU cache after writes into WT objects. v4: Rease onto LLC updates and report WT as "uncached" for get_cache_level_ioctl to remain symmetric with set_cache_level_ioctl. Signed-off-by: Chris Wilson Cc: Ville Syrjälä Cc: Kenneth Graunke Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 4 +++- drivers/gpu/drm/i915/i915_gem.c | 14 ++++++++++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 11 ++++++++++- include/uapi/drm/i915_drm.h | 1 + 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ce098c3ccc00..f4231185ec7d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_LLC: value = HAS_LLC(dev); break; + case I915_PARAM_HAS_WT: + value = HAS_WT(dev); + break; case I915_PARAM_HAS_ALIASING_PPGTT: value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1af59d72ddc7..6d07467d0e7e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -454,6 +454,7 @@ enum i915_cache_level { caches, eg sampler/render caches, and the large Last-Level-Cache. LLC is coherent with the CPU, but L3 is only visible to the GPU. */ + I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ }; typedef uint32_t gen6_gtt_pte_t; @@ -1385,7 +1386,7 @@ struct drm_i915_gem_object { unsigned int pending_fenced_gpu_access:1; unsigned int fenced_gpu_access:1; - unsigned int cache_level:2; + unsigned int cache_level:3; unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; @@ -1530,6 +1531,7 @@ struct drm_i915_file_private { #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 474748ffa7b6..4064fdf15abe 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3471,7 +3471,16 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, goto unlock; } - args->caching = obj->cache_level != I915_CACHE_NONE; + switch (obj->cache_level) { + case I915_CACHE_LLC: + case I915_CACHE_L3_LLC: + args->caching = I915_CACHING_CACHED; + break; + + default: + args->caching = I915_CACHING_NONE; + break; + } drm_gem_object_unreference(&obj->base); unlock: @@ -3565,7 +3574,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * of uncaching, which would allow us to flush all the LLC-cached data * with that bit in the PTE to main memory with just one PIPE_CONTROL. */ - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + ret = i915_gem_object_set_cache_level(obj, + HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); if (ret) goto err_unpin_display; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c9420c280cf0..212f6d8c35ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -55,6 +55,7 @@ #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, enum i915_cache_level level) @@ -138,8 +139,16 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, gen6_gtt_pte_t pte = GEN6_PTE_VALID; pte |= HSW_PTE_ADDR_ENCODE(addr); - if (level != I915_CACHE_NONE) + switch (level) { + case I915_CACHE_NONE: + break; + case I915_CACHE_WT: + pte |= HSW_WT_ELLC_LLC_AGE0; + break; + default: pte |= HSW_WB_ELLC_LLC_AGE0; + break; + } return pte; } diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 0bb3e5524382..55bb5729bd78 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -334,6 +334,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_PINNED_BATCHES 24 #define I915_PARAM_HAS_EXEC_NO_RELOC 25 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 +#define I915_PARAM_HAS_WT 27 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3 From 4257d3ba3b87a84adb2f840620cb63512f0bab22 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 8 Aug 2013 14:41:11 +0100 Subject: drm/i915: Allow the user to set bo into the DISPLAY cache domain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is primarily for the benefit of the create2 ioctl so that the caller can avoid the later step of rebinding the bo with new PTE bits. After introducing WT (and possibly GFDT) cacheing for display targets, not everything in the display is earmarked as UC, and more importantly what is is controlled by the kernel. Note that set_cache_level/get_cache_level for DISPLAY is not necessarily idempotent; get_cache_level may return UC for architectures that have no special cache domain for the display engine. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4064fdf15abe..5f48ecc77ec2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3477,6 +3477,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, args->caching = I915_CACHING_CACHED; break; + case I915_CACHE_WT: + args->caching = I915_CACHING_DISPLAY; + break; + default: args->caching = I915_CACHING_NONE; break; @@ -3503,6 +3507,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, case I915_CACHING_CACHED: level = I915_CACHE_LLC; break; + case I915_CACHING_DISPLAY: + level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; + break; default: return -EINVAL; } -- cgit v1.2.3 From f3f08572fc245bc0cf5f102473ce0f54e693831d Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 12 Aug 2013 14:56:53 -0300 Subject: drm/i915: remove set but unused variables Caught by "make W=1 drivers/gpu/drm/i915/". Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 12 ++---------- drivers/gpu/drm/i915/intel_dp.c | 3 --- drivers/gpu/drm/i915/intel_hdmi.c | 2 -- 3 files changed, 2 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bafdc3e21e87..d2dec3ff308a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -690,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, { u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; u32 m, n, fastclk; - u32 updrate, minupdate, fracbits, p; + u32 updrate, minupdate, p; unsigned long bestppm, ppm, absppm; int dotclk, flag; @@ -701,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, fastclk = dotclk / (2*100); updrate = 0; minupdate = 19200; - fracbits = 1; n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; bestm1 = bestm2 = bestp1 = bestp2 = 0; @@ -4423,13 +4422,10 @@ static void vlv_update_pll(struct intel_crtc *crtc) int pipe = crtc->pipe; u32 dpll, mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; - bool is_hdmi; u32 coreclk, reg_val, dpll_md; mutex_lock(&dev_priv->dpio_lock); - is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); - bestn = crtc->config.dpll.n; bestm1 = crtc->config.dpll.m1; bestm2 = crtc->config.dpll.m2; @@ -8894,14 +8890,13 @@ intel_modeset_stage_output_state(struct drm_device *dev, struct drm_crtc *new_crtc; struct intel_connector *connector; struct intel_encoder *encoder; - int count, ro; + int ro; /* The upper layers ensure that we either disable a crtc or have a list * of connectors. For paranoia, double-check this. */ WARN_ON(!set->fb && (set->num_connectors != 0)); WARN_ON(set->fb && (set->num_connectors == 0)); - count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { /* Otherwise traverse passed in connector list and get encoders @@ -8935,7 +8930,6 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* connector->new_encoder is now updated for all connectors. */ /* Update crtc of enabled connectors. */ - count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { if (!connector->new_encoder) @@ -10295,7 +10289,6 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; /* * Interrupts and polling as the first thing to avoid creating havoc. @@ -10319,7 +10312,6 @@ void intel_modeset_cleanup(struct drm_device *dev) if (!crtc->fb) continue; - intel_crtc = to_intel_crtc(crtc); intel_increase_pllclock(crtc); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 63b6722d4285..2726d4d41722 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2326,7 +2326,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) struct drm_device *dev = encoder->dev; int i; uint8_t voltage; - bool clock_recovery = false; int voltage_tries, loop_tries; uint32_t DP = intel_dp->DP; @@ -2344,7 +2343,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) voltage = 0xff; voltage_tries = 0; loop_tries = 0; - clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint8_t link_status[DP_LINK_STATUS_SIZE]; @@ -2365,7 +2363,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { DRM_DEBUG_KMS("clock recovery OK\n"); - clock_recovery = true; break; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 88562913fb7f..94179fdf61f5 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1232,7 +1232,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) { struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; struct intel_connector *intel_connector; intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); @@ -1246,7 +1245,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) } intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); -- cgit v1.2.3 From a1d95703b7fa5cbc4abf53f63df51c49cbacc7b6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 13 Aug 2013 18:48:47 +0100 Subject: drm/i915: Print the changes required for modeset After computing the stage changes for the set_config, record those in the debug log. Signed-off-by: Chris Wilson Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d2dec3ff308a..a87bb93a0f63 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8880,6 +8880,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, drm_mode_debug_printmodeline(set->mode); config->mode_changed = true; } + + DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", + set->crtc->base.id, config->mode_changed, config->fb_changed); } static int -- cgit v1.2.3 From ed1c9e2cf414e32cb7ea1217b51b39e70fc132d2 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 12 Aug 2013 14:34:08 -0300 Subject: drm/i915: print a message when we detect an early Haswell SDV The machines that fall in this category are the SDVs that have a PCI ID starting with 0x0C. These are very early pre-production machines and may not fully work. Other Haswell SDVs have PCI IDs that match the real Haswell machines and we expect them to work better. Even though they have problems, they still mostly work so I don't see a reason to refuse loading our driver. But I do see a reason to reject bug reports from these machines, so the message should help the bug triagers. As far as I know, we don't implement some workarounds that are specific to these machines and suspend/resume may not work on most of them, but besides this, they may work. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=61508 Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 8 ++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f4231185ec7d..d4c176b7d76e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1488,6 +1488,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) i915_dump_device_info(dev_priv); + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6d07467d0e7e..2e7d5f9524f7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1511,6 +1511,8 @@ struct drm_i915_file_private { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ + ((dev)->pci_device & 0xFF00) == 0x0C00) #define IS_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pci_device & 0xFF00) == 0x0A00) -- cgit v1.2.3 From 351aa5666d02062b52329bcfe4bcf9d1f882fba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Marchesin?= Date: Tue, 13 Aug 2013 11:55:17 -0700 Subject: drm/i915: tune the RC6 threshold for stability MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's basically the same deal as the RC6+ issues on ivy bridge except this time with RC6 on sandy bridge. Like last time the core of the issue is that the timings don't work 100% with our voltage regulator. So from time to time, the kernel will print a warning message about the GPU not getting out of RC6. In particular, I found this fairly easy to reproduce during suspend/resume. Changing the threshold to 125000 instead of 50000 seems to fix the issue. The previous patch used 150000 but as it turns out this doesn't work everywhere. After getting such a machine, I bisected the highest value which works, which is 125000, so here it is. I also measured the idle power usage before/after this patch and didn't see a difference on a sandy bridge laptop. On haswell and up, it makes a big difference, so we want to keep it at 50k there. It also seems like haswell doesn't have the RC6 issues that sandy bridge has so the 50k value is fine. Signed-off-by: Stéphane Marchesin Acked-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3ac5fe9d428a..76150818b5cb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3508,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) + I915_WRITE(GEN6_RC6_THRESHOLD, 125000); + else + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ -- cgit v1.2.3 From 5020150b3b8d2912466e28572f25b3cc56722aec Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 12 Aug 2013 16:53:03 -0700 Subject: drm/i915: Initialize seqno for VECS too We require n-1 mailboxes for proper semaphore synchronization. All semaphore synchronization code relies on proper values in these mailboxes. The fact that we failed to touch the vebox ring by itself was unlikely to be an issue since the HW should be initializing the values to 0. However the error framework for testing seqno wrap introduced by Mika, in addition to the hangcheck via seqno, and i915_error_first_batchbuffer() combined caused a nice explosion. The problem is caused by seqno wrap because the wrap condition is not properly setup. The wrap code attempts to set the sync mailboxes all to 0, and then set the current seqno to one less than 0. In all cases, the vebox mailbox wasn't properly being initialized. This caused a wrap to not occur. When hangcheck kicks in with the bogus seqno values, the rest just doesn't work. It makes me wonder if we shouldn't consider a dumber version of hangcheck... How we messed this up: VECS support was written before the aforementioned other features. Upon VECS being rebased, these facts were missed. Cc: Mika Kuoppala Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65387 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67198 Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 74d02a704515..34777168f700 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1594,6 +1594,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) if (INTEL_INFO(ring->dev)->gen >= 6) { I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); + if (HAS_VEBOX(ring->dev)) + I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); } ring->set_seqno(ring, seqno); -- cgit v1.2.3 From 4e5aabfd3106cfd68694db416e271996aadf114a Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 12 Aug 2013 16:53:04 -0700 Subject: drm/i915: Get VECS semaphore info on error Ideally we could use for_each_ring with the ring flags as I've done a couple times (http://lists.freedesktop.org/archives/intel-gfx/2013-June/029450.html). Until Daniel merges that patch though, we can just use this. Cc: Mika Kuoppala Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gpu_error.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 60393cb9a7c7..558e568d5b45 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -243,6 +243,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", error->semaphore_mboxes[ring][1], error->semaphore_seqno[ring][1]); + if (HAS_VEBOX(dev)) { + err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][2], + error->semaphore_seqno[ring][2]); + } } err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); @@ -682,6 +687,12 @@ static void i915_record_ring_state(struct drm_device *dev, error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; } + if (HAS_VEBOX(dev)) { + error->semaphore_mboxes[ring->id][2] = + I915_READ(RING_SYNC_2(ring->mmio_base)); + error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; + } + if (INTEL_INFO(dev)->gen >= 4) { error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); -- cgit v1.2.3 From 4a025e26a2979193739f46e391ffc05cf0637d90 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 14 Aug 2013 10:01:32 +0200 Subject: drm/i915: clarify error paths in create_stolen_for_preallocated Use the standard inversely ordered goto label stack for everything. Spotted while reviewing place where we might need to to call vma_destroy but failed to do so. Cc: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_stolen.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e20d64966c72..7f4c510a751b 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -392,8 +392,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - i915_gem_vma_destroy(vma); - goto err_out; + goto err_vma; } } @@ -404,6 +403,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, return obj; +err_vma: + i915_gem_vma_destroy(vma); err_out: drm_mm_put_block(stolen); drm_gem_object_unreference(&obj->base); -- cgit v1.2.3 From 433544bd25b06cb6dcdb79b6da8d748a0220898e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 13 Aug 2013 18:09:06 -0700 Subject: drm/i915: Remove node only when allocated VMAs can be created and not bound. One may think of it as lazy cleanup, and safely gloss over the conditions which manufacture it. In either case, when the object backing the i915 vma is destroyed, we must cleanup the vma without stumbling into a bunch of pitfalls that assume the vma is bound. NOTE: I was pretty certain the above condition could only happen when we introduced the use of VMAs being looked up at execbuf, and already existing. Paulo has hit this though, so I must be missing something. As I believe the patch is correct anyway, therefore I won't scratch my head too hard. v2: use goto destroy as a compromise (Chris) Cc: Chris Wilson Cc: Paulo Zanoni Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5f48ecc77ec2..910fbaff0e99 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2619,6 +2619,9 @@ int i915_vma_unbind(struct i915_vma *vma) if (list_empty(&vma->vma_link)) return 0; + if (!drm_mm_node_allocated(&vma->node)) + goto destroy; + if (obj->pin_count) return -EBUSY; @@ -2656,6 +2659,8 @@ int i915_vma_unbind(struct i915_vma *vma) obj->map_and_fenceable = true; drm_mm_remove_node(&vma->node); + +destroy: i915_gem_vma_destroy(vma); /* Since the unbound list is global, only move to that list if -- cgit v1.2.3 From 4bd561b3e8d7d2407cf465cb79c51a1ff1264343 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 13 Aug 2013 18:09:07 -0700 Subject: drm/i915: cleanup map&fence in bind Cleanup the map and fenceable setting during bind to make more sense, and not check i915_is_ggtt() 2 unnecessary times v2: Move the bools into the if block (Chris) - There are ways to tidy this function (fence calculations for instance) even further, but they are quite invasive, so I am punting on those unless specifically asked. v3: Add newline between variable declaration and logic (Chris) Recommended-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 910fbaff0e99..6cb4467005c0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3119,7 +3119,6 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - bool mappable, fenceable; size_t gtt_max = map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; @@ -3203,18 +3202,18 @@ search_free: list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&vma->mm_list, &vm->inactive_list); - fenceable = - i915_is_ggtt(vm) && - i915_gem_obj_ggtt_size(obj) == fence_size && - (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0; + if (i915_is_ggtt(vm)) { + bool mappable, fenceable; - mappable = - i915_is_ggtt(vm) && - vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; + fenceable = + i915_gem_obj_ggtt_size(obj) == fence_size && + (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0; + + mappable = + vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; - /* Map and fenceable only changes if the VM is the global GGTT */ - if (i915_is_ggtt(vm)) obj->map_and_fenceable = mappable && fenceable; + } WARN_ON(map_and_fenceable && !obj->map_and_fenceable); -- cgit v1.2.3 From 49987099e2cfce4eda5d428e2618fd4e93aba597 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 14 Aug 2013 10:21:23 +0200 Subject: drm/i915: use vma->node directly and rewrap map&fence in bind Use () to make for neater alignment of the split lines, too. With this we ditch another jump through the obj_gtt_size/offset indirection maze. Cc: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6cb4467005c0..fd4872497a72 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3205,12 +3205,11 @@ search_free: if (i915_is_ggtt(vm)) { bool mappable, fenceable; - fenceable = - i915_gem_obj_ggtt_size(obj) == fence_size && - (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0; + fenceable = (vma->node.size == fence_size && + (vma->node.start & (fence_alignment - 1)) == 0); - mappable = - vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; + mappable = (vma->node.start + obj->base.size <= + dev_priv->gtt.mappable_end); obj->map_and_fenceable = mappable && fenceable; } -- cgit v1.2.3 From 3ef80a818bce56fb4a7ed4465a8fc8372085b9a4 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 13 Aug 2013 18:09:08 -0700 Subject: drm: WARN when removing unallocated node The conditional is usually a recoverable driver bug, and so WARNing, and preventing the drm_mm code from doing potential damage (BUG) is desirable. This issue was hit and fixed twice while developing the i915 multiple address space code. The first fix is the patch just before this, and is hit on an not frequently occuring error path. Another was fixed during patch iteration, so it's hard to see from the patch: commit c6cfb325677ea6305fb19acf3a4d14ea267f923e Author: Ben Widawsky Date: Fri Jul 5 14:41:06 2013 -0700 drm/i915: Embed drm_mm_node in i915 gem obj From the intel-gfx mailing list, we discussed this: References: <20130705191235.GA3057@bwidawsk.net> Cc: Dave Airlie CC: Acked-by: Chris Wilson Signed-off-by: Ben Widawsky Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_mm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index fe304f903b13..feb267f37e21 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -346,6 +346,9 @@ void drm_mm_remove_node(struct drm_mm_node *node) struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; + if (WARN_ON(!node->allocated)) + return; + BUG_ON(node->scanned_block || node->scanned_prev_free || node->scanned_next_free); -- cgit v1.2.3 From 4b6d846e9a20ac8c9dd641d0ea875c28f331e241 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 12 Aug 2013 11:46:17 +0100 Subject: drm/i915: Drop the overzealous warning from i915_gem_set_cache_level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By our earlier reckoning, move from a snooped/llc setting to an uncached setting, leaves the CPU cache in a consistent state irrespective of our domain tracking - so we can forgo the warning about the lack of invalidation. Similarly for any writes posted to the snooped CPU domain, we know will be safely clflushed to the uncached PTEs after forcing the domain change. This WARN started to pop up with commit d46f1c3f1372e3a72fab97c60480aa4a1084387f Author: Chris Wilson AuthorDate: Thu Aug 8 14:41:06 2013 +0100 drm/i915: Allow the GPU to cache stolen memory Ville brought up a scenario where the interaction of a set_caching ioctl call from userspace on a scanout buffer (i.e. obj->pin_display is set) resulted in the code getting confused and not properly flushing stale cpu cachelines. Luckily we already prevent this by rejecting caching changes when obj->pin_count is set. Signed-off-by: Chris Wilson Cc: Ville Syrjälä Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68040 Tested-by: cancan,feng [danvet: Add buglink, bisect result and explain why Ville's scenario is already taken care of.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd4872497a72..41dc04293584 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3440,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * Just set it to the CPU cache for now. */ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); - WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; -- cgit v1.2.3 From 8dc8a27c9733a41cda84e8c70da8313e1d54c4ae Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 2 Aug 2013 16:22:24 -0300 Subject: drm/i915: check the power well when redisabling VGA If the power well is disabled VGA is guaranteed to be disabled. This fixes unclaimed register messages that happen on suspend/resume. v2: Check the actual hw power well state instead of our own tracking to make sure VGA is _really_ off (in case the BIOS/KVMr has just its own request bit set). Requested by Ville. Note: Ville suggested whether it wouldn't be better to just enable the power well over a slightly longer time in our resume code, since we already do that. I tend to agree, but there's also the modeset force code in the lid notifier which _also_ eventually calls redisable_vga. We shouldn't ever need this on somewhat modern hw (everything with opregion essentially) but the code to bail out isn't there. Hence stick with this simple approach here for now. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67517 Signed-off-by: Paulo Zanoni [danvet: Summarize the discussion around the resume sequence and lid notifier a bit.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a87bb93a0f63..f83316ec0b94 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10104,6 +10104,17 @@ void i915_redisable_vga(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 vga_reg = i915_vgacntrl_reg(dev); + /* This function can be called both from intel_modeset_setup_hw_state or + * at a very early point in our resume sequence, where the power well + * structures are not yet restored. Since this function is at a very + * paranoid "someone might have enabled VGA while we were not looking" + * level, just check if the power well is enabled instead of trying to + * follow the "don't touch the power well if we don't need it" policy + * the rest of the driver uses. */ + if (HAS_POWER_WELL(dev) && + (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE) == 0) + return; + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); i915_disable_vga(dev); -- cgit v1.2.3 From 6aedd1f539f51b7b0c3d6be0088c3541f9d2c294 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 2 Aug 2013 16:22:25 -0300 Subject: drm/i915: clarify Haswell power well bit names Whenever I need to work with the HSW_PWER_WELL_* register bits I have to look at the documentation to find out which bit is to request the power well and which one shows its current state. Rename the bits so I won't need to look the docs every time. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 13 +++++++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fab94be89dfa..3c652eb7ee6c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4825,8 +4825,8 @@ #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ -#define HSW_PWR_WELL_ENABLE (1<<31) -#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) +#define HSW_PWR_WELL_STATE_ENABLED (1<<30) #define HSW_PWR_WELL_CTL5 0x45410 #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f83316ec0b94..e600e1cfb6ea 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10112,7 +10112,7 @@ void i915_redisable_vga(struct drm_device *dev) * follow the "don't touch the power well if we don't need it" policy * the rest of the driver uses. */ if (HAS_POWER_WELL(dev) && - (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE) == 0) + (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) return; if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 76150818b5cb..0d90064775c9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5285,7 +5285,7 @@ bool intel_display_power_enabled(struct drm_device *dev, case POWER_DOMAIN_TRANSCODER_B: case POWER_DOMAIN_TRANSCODER_C: return I915_READ(HSW_PWR_WELL_DRIVER) == - (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); + (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); default: BUG(); } @@ -5298,17 +5298,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) uint32_t tmp; tmp = I915_READ(HSW_PWR_WELL_DRIVER); - is_enabled = tmp & HSW_PWR_WELL_STATE; - enable_requested = tmp & HSW_PWR_WELL_ENABLE; + is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; + enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; if (enable) { if (!enable_requested) - I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); + I915_WRITE(HSW_PWR_WELL_DRIVER, + HSW_PWR_WELL_ENABLE_REQUEST); if (!is_enabled) { DRM_DEBUG_KMS("Enabling power well\n"); if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & - HSW_PWR_WELL_STATE), 20)) + HSW_PWR_WELL_STATE_ENABLED), 20)) DRM_ERROR("Timeout enabling power well\n"); } } else { @@ -5410,7 +5411,7 @@ void intel_init_power_well(struct drm_device *dev) /* We're taking over the BIOS, so clear any requests made by it since * the driver is in charge now. */ - if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) I915_WRITE(HSW_PWR_WELL_BIOS, 0); } -- cgit v1.2.3 From fd547d25a8ac3f390fee4a689de86a64e3d65fe1 Mon Sep 17 00:00:00 2001 From: Vinit Azad Date: Wed, 14 Aug 2013 13:34:33 -0700 Subject: drm/i915: Only unmask required PM interrupts Un-masking all PM interrupts causes hardware to generate interrupts regardless of whether the interrupts are enabled on the DE side. Since turbo only need up/down threshold and rc6 timeout interrupt, mask all other interrupts bits to avoid unnecessary overhead/wake up. Note that our interrupt handler isn't being fired since we do set the IER bits properly (IIR bits aren't set). The overhead isn't because our driver is reacting to these interrupts, but because hardware keeps generating internal messages when PMINTRMSK doesn't mask out the up/down EI interrupts (which happen periodically). Change-Id: I6c947df6fd5f60584d39b9e8b8c89faa51a5e827 Signed-off-by: Vinit Azad [danvet: Add follow-up explanation of the precise effects from Vinit as a note to the commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0d90064775c9..7bc3f1783174 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3453,8 +3453,8 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); - /* unmask all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); + /* only unmask PM interrupts we need. Mask all others. */ + I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); } static void gen6_enable_rps(struct drm_device *dev) -- cgit v1.2.3 From f214266c0d147c0a2608caafc43c832f1738f0a9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 09:10:37 +0200 Subject: drm/i915: unpin backing storage in dmabuf_unmap This fixes a WARN in i915_gem_free_object when the obj->pages_pin_count isn't 0. v2: Add locking to unmap, noticed by Chris Wilson. Note that even though we call unmap with our own dev->struct_mutex held that won't result in an immediate deadlock since we never go through the dma_buf interfaces for our own, reimported buffers. But it's still easy to blow up and anger lockdep, but that's already the case with our ->map implementation. Fixing this for real will involve per dma-buf ww mutex locking by the callers. And lots of fun. So go with the duct-tape approach for now. Cc: Chris Wilson Reported-by: Maarten Lankhorst Cc: Maarten Lankhorst Tested-by: Armin K. (v1) Acked-by: Maarten Lankhorst Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + + mutex_lock(&obj->base.dev->struct_mutex); + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); + + i915_gem_object_unpin_pages(obj); + + mutex_unlock(&obj->base.dev->struct_mutex); } static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) -- cgit v1.2.3 From 608806a549c656c925eeb253cbed768535f26e41 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Aug 2013 09:10:38 +0200 Subject: drm/i915: explicit store base gem object in dma_buf->priv Makes it more obviously correct what tricks we play by reusing the drm prime release helper. Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 9e6578330801..938eb341054c 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -27,10 +27,15 @@ #include "i915_drv.h" #include +static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) +{ + return to_intel_bo(buf->priv); +} + static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { - struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); struct sg_table *st; struct scatterlist *src, *dst; int ret, i; @@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { - struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); mutex_lock(&obj->base.dev->struct_mutex); @@ -111,7 +116,7 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; struct sg_page_iter sg_iter; struct page **pages; @@ -159,7 +164,7 @@ error: static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; int ret; @@ -202,7 +207,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; int ret; bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); @@ -233,9 +238,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags); + return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) @@ -272,7 +275,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, /* is this one of own objects? */ if (dma_buf->ops == &i915_dmabuf_ops) { - obj = dma_buf->priv; + obj = dma_buf_to_obj(dma_buf); /* is it from our device? */ if (obj->base.dev == dev) { /* -- cgit v1.2.3 From 79f8dea13391f8220470997f9a5213ab5aa9f1c7 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 14 Aug 2013 14:40:37 -0300 Subject: drm/i915: enable the power well before module unload Our driver initialization doesn't seem to be ready to load when the power well is disabled: we hit a few "Unclaimed register" messages. So do just like we already do for the suspend/resume path: enable the power well before unloading. At some point we'll want to be able to survive suspend/resume and load/unload with the power well disabled, but for now let's just fix the regression. Regression introduced by the following commit: commit bf51d5e2cda5d36d98e4b46ac7fca9461e512c41 Author: Paulo Zanoni Date: Wed Jul 3 17:12:13 2013 -0300 drm/i915: switch disable_power_well default value to 1 Bug can be reproduced by running the "module_reload" script from intel-gpu-tools. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67813 Signed-off-by: Paulo Zanoni Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d4c176b7d76e..5a051eaab9ef 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1688,8 +1688,13 @@ int i915_driver_unload(struct drm_device *dev) intel_gpu_ips_teardown(); - if (HAS_POWER_WELL(dev)) + if (HAS_POWER_WELL(dev)) { + /* The i915.ko module is still not prepared to be loaded when + * the power well is not enabled, so just enable it in case + * we're going to unload/reload. */ + intel_set_power_well(dev, true); i915_remove_power_well(dev); + } i915_teardown_sysfs(dev); -- cgit v1.2.3 From 99486b8e6140da7721c932e708a6c17dc1dd970a Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Tue, 13 Aug 2013 16:23:17 -0700 Subject: i915: Add a Kconfig option to turn on i915.preliminary_hw_support by default When building kernels for a preliminary hardware target, having to add a kernel command-line option can prove inconvenient. Add a Kconfig option that changes the default of this option to 1. Signed-off-by: Josh Triplett Reviewed-by: Damien Lespiau [danvet: Pimp the Kconfig help text a bit as suggested by Damien in his 2nd review.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/Kconfig | 11 +++++++++++ drivers/gpu/drm/i915/i915_drv.c | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a7c54c843291..cd4246b480d4 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -168,6 +168,17 @@ config DRM_I915_KMS the driver to bind to PCI devices, which precludes loading things like intelfb. +config DRM_I915_PRELIMINARY_HW_SUPPORT + bool "Enable preliminary support for prerelease Intel hardware by default" + depends on DRM_I915 + help + Choose this option if you have prerelease Intel hardware and want the + i915 driver to support it by default. You can enable such support at + runtime with the module option i915.preliminary_hw_support=1; this + option changes the default for that module option. + + If in doubt, say "N". + config DRM_MGA tristate "Matrox g200/g400" depends on DRM && PCI diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 01d63a0435fb..fd9fb2c25691 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -122,10 +122,10 @@ int i915_enable_psr __read_mostly = 0; module_param_named(enable_psr, i915_enable_psr, int, 0600); MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); -unsigned int i915_preliminary_hw_support __read_mostly = 0; +unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT); module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); MODULE_PARM_DESC(preliminary_hw_support, - "Enable preliminary hardware support. (default: false)"); + "Enable preliminary hardware support."); int i915_disable_power_well __read_mostly = 1; module_param_named(disable_power_well, i915_disable_power_well, int, 0600); -- cgit v1.2.3 From b25cb2f8828aca6204d9c93d4d677f27e3ae9fa6 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 14 Aug 2013 11:38:33 +0200 Subject: drm/i915: s/obj->exec_list/obj->obj_exec_link in debugfs To convert the execbuf code over to use vmas natively we need to shuffle the exec_list a bit. This patch here just prepares things with the debugfs code, which also uses the old exec_list list_head, newly called obj_exec_link. Signed-off-by: Ben Widawsky [danvet: Split out from Ben's big patch.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 12 ++++++------ drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 1 + 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eb87865c20d4..4785d8c14654 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -195,9 +195,9 @@ static int obj_rank_by_stolen(void *priv, struct list_head *A, struct list_head *B) { struct drm_i915_gem_object *a = - container_of(A, struct drm_i915_gem_object, exec_list); + container_of(A, struct drm_i915_gem_object, obj_exec_link); struct drm_i915_gem_object *b = - container_of(B, struct drm_i915_gem_object, exec_list); + container_of(B, struct drm_i915_gem_object, obj_exec_link); return a->stolen->start - b->stolen->start; } @@ -221,7 +221,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) if (obj->stolen == NULL) continue; - list_add(&obj->exec_list, &stolen); + list_add(&obj->obj_exec_link, &stolen); total_obj_size += obj->base.size; total_gtt_size += i915_gem_obj_ggtt_size(obj); @@ -231,7 +231,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) if (obj->stolen == NULL) continue; - list_add(&obj->exec_list, &stolen); + list_add(&obj->obj_exec_link, &stolen); total_obj_size += obj->base.size; count++; @@ -239,11 +239,11 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) list_sort(NULL, &stolen, obj_rank_by_stolen); seq_puts(m, "Stolen:\n"); while (!list_empty(&stolen)) { - obj = list_first_entry(&stolen, typeof(*obj), exec_list); + obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); seq_puts(m, " "); describe_obj(m, obj); seq_putc(m, '\n'); - list_del_init(&obj->exec_list); + list_del_init(&obj->obj_exec_link); } mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2e7d5f9524f7..6532d9713b72 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1312,6 +1312,8 @@ struct drm_i915_gem_object { struct list_head global_list; struct list_head ring_list; + /** Used in execbuf to temporarily hold a ref */ + struct list_head obj_exec_link; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 41dc04293584..ce40e27f8b42 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3992,6 +3992,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); + INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->vma_list); obj->ops = ops; -- cgit v1.2.3 From 82a55ad1a0585e4e01a47f72fe81fb5a2d2c0fb1 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 14 Aug 2013 11:38:34 +0200 Subject: drm/i915: Switch eviction code to use vmas The execbuf wants to do relocations usings vmas, so we need a vma->exec_list. The eviction code also uses the old obj execbuf list for it's own book-keeping, but would really prefer to deal in vmas only. So switch it over to the new list. Again this is just a prep patch for the big execbuf vma conversion. Signed-off-by: Ben Widawsky [danvet: Split out from Ben's big execbuf vma patch.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_gem_evict.c | 31 ++++++++++++++----------------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6532d9713b72..b2b9836c92e1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -563,6 +563,10 @@ struct i915_vma { struct list_head mm_list; struct list_head vma_link; /* Link in the object's VMA list */ + + /** This vma's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; + }; struct i915_ctx_hang_stats { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ce40e27f8b42..ca29055ae206 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4132,6 +4132,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&vma->vma_link); INIT_LIST_HEAD(&vma->mm_list); + INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 425939b7d343..87875884770c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -37,7 +37,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) if (vma->obj->pin_count) return false; - list_add(&vma->obj->exec_list, unwind); + list_add(&vma->exec_list, unwind); return drm_mm_scan_add_block(&vma->node); } @@ -49,7 +49,6 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; - struct drm_i915_gem_object *obj; int ret = 0; trace_i915_gem_evict(dev, min_size, alignment, mappable); @@ -104,14 +103,13 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, none: /* Nothing found, clean up and bail out! */ while (!list_empty(&unwind_list)) { - obj = list_first_entry(&unwind_list, - struct drm_i915_gem_object, + vma = list_first_entry(&unwind_list, + struct i915_vma, exec_list); - vma = i915_gem_obj_to_vma(obj, vm); ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); - list_del_init(&obj->exec_list); + list_del_init(&vma->exec_list); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -125,28 +123,27 @@ found: * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { - obj = list_first_entry(&unwind_list, - struct drm_i915_gem_object, + vma = list_first_entry(&unwind_list, + struct i915_vma, exec_list); - vma = i915_gem_obj_to_vma(obj, vm); if (drm_mm_scan_remove_block(&vma->node)) { - list_move(&obj->exec_list, &eviction_list); - drm_gem_object_reference(&obj->base); + list_move(&vma->exec_list, &eviction_list); + drm_gem_object_reference(&vma->obj->base); continue; } - list_del_init(&obj->exec_list); + list_del_init(&vma->exec_list); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - obj = list_first_entry(&eviction_list, - struct drm_i915_gem_object, + vma = list_first_entry(&eviction_list, + struct i915_vma, exec_list); if (ret == 0) - ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); + ret = i915_vma_unbind(vma); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + list_del_init(&vma->exec_list); + drm_gem_object_unreference(&vma->obj->base); } return ret; -- cgit v1.2.3 From accfef2e5a8f713bfa0c06696b5e10754686dc72 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 14 Aug 2013 11:38:35 +0200 Subject: drm/i915: prepare bind_to_vm for preallocated vma In the new execbuf code we want to track buffers using the vmas even before they're all properly mapped. Which means that bind_to_vm needs to deal with buffers which have preallocated vmas which aren't yet bound. This patch implements this prep work and adjusts our WARN/BUG checks. Signed-off-by: Ben Widawsky [danvet: Split out from Ben's big execbuf patch. Also move one BUG back to its original place to deflate the diff a notch.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b2b9836c92e1..4bd66e9a0450 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1914,6 +1914,9 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm); struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); /* Some GGTT VM helpers */ #define obj_to_ggtt(obj) \ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ca29055ae206..449575b85b31 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3124,9 +3124,6 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - if (WARN_ON(!list_empty(&obj->vma_list))) - return -EBUSY; - fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3165,16 +3162,17 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - /* FIXME: For now we only ever use 1 VMA per object */ BUG_ON(!i915_is_ggtt(vm)); - WARN_ON(!list_empty(&obj->vma_list)); - vma = i915_gem_vma_create(obj, vm); + vma = i915_gem_obj_lookup_or_create_vma(obj, vm); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unpin; } + /* For now we only ever use 1 vma per object */ + WARN_ON(!list_is_singular(&obj->vma_list)); + search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, @@ -4882,3 +4880,16 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, return NULL; } + +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = i915_gem_vma_create(obj, vm); + + return vma; +} -- cgit v1.2.3 From 8637b407cf1740c52a01b9fc0cf506f31e225151 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 16 Aug 2013 13:29:33 -0700 Subject: drm/i915/vma: Correct use after free in eviction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The vma will [possibly] be destroyed during unbind in eviction. Immediately after this, we try to delete the list entry. Chris and Ville did the debug on this before I woke up, I just get to take credit for the fix :p For future reference the Oops that Mika reported: [ 403.472448] BUG: unable to handle kernel paging request at 6b6b6b6b [ 403.472473] IP: [] __list_del_entry+0x20/0xe0 [ 403.472514] *pdpt = 000000002e89c001 *pde = 0000000000000000 [ 403.472556] Oops: 0000 [#1] SMP [ 403.472582] Modules linked in: mxm_wmi snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_intel snd_hda_codec snd_hwdep snd_pcm snd_seq_midi snd_rawmidi psmouse snd_seq_midi_event snd_seq serio_raw snd_timer snd_seq_device snd soundcore snd_page_alloc wmi bnep rfcomm bluetooth mac_hid parport_pc ppdev lp parport usbhid dm_crypt firewire_ohci firewire_core crc_itu_t i915 drm_kms_helper e1000e ptp drm i2c_algo_bit pps_core xhci_hcd video [ 403.472895] CPU: 2 PID: 1940 Comm: Xorg Not tainted 3.11.0-rc2+ #827 [ 403.472938] Hardware name: /DZ77BH-55K, BIOS BHZ7710H.86A.0070.2012.0416.2117 04/16/2012 [ 403.473002] task: ec866c00 ti: ee6a2000 task.ti: ee6a2000 [ 403.473039] EIP: 0060:[] EFLAGS: 00013202 CPU: 2 [ 403.473078] EIP is at __list_del_entry+0x20/0xe0 [ 403.473109] EAX: f016d9bc EBX: f016d9bc ECX: 6b6b6b6b EDX: 6b6b6b6b [ 403.473151] ESI: 00000000 EDI: ee6a3c90 EBP: ee6a3c60 ESP: ee6a3c48 [ 403.473193] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 [ 403.473230] CR0: 80050033 CR2: 6b6b6b6b CR3: 2ec43000 CR4: 001407f0 [ 403.473271] Stack: [ 403.473285] f63b2ff0 f61f98c0 f61f8000 f016d9bc 00000000 f016d9bc ee6a3cac f8519a4a [ 403.473347] 00000000 00000000 10000000 f61f8000 0100a000 10000000 00000001 008ca000 [ 403.473410] f64ee840 f61f98c0 f016d9bc f016dcec ee6a3c98 ee6a3c98 f61f98c0 dcc58f00 [ 403.473472] Call Trace: [ 403.473509] [] i915_gem_evict_something+0x17a/0x2d0 [i915] [ 403.473567] [] i915_gem_object_pin+0x271/0x660 [i915] [ 403.473622] [] ? i915_ggtt_clear_range+0x20/0x20 [i915] [ 403.473676] [] i915_gem_object_pin_to_display_plane+0xda/0x190 [i915] [ 403.473742] [] intel_pin_and_fence_fb_obj+0xba/0x140 [i915] [ 403.473800] [] intel_gen7_queue_flip+0x30/0x1c0 [i915] [ 403.473856] [] intel_crtc_page_flip+0x1a0/0x320 [i915] [ 403.473911] [] ? drm_framebuffer_reference+0x39/0x80 [drm] [ 403.473965] [] drm_mode_page_flip_ioctl+0x28b/0x320 [drm] [ 403.474018] [] drm_ioctl+0x4b8/0x560 [drm] [ 403.474064] [] ? drm_mode_gamma_get_ioctl+0xd0/0xd0 [drm] [ 403.474113] [] ? do_sync_read+0x6a/0xa0 [ 403.474154] [] ? drm_copy_field+0x80/0x80 [drm] [ 403.474193] [] do_vfs_ioctl+0x7c/0x5b0 [ 403.474228] [] ? vfs_read+0xef/0x160 [ 403.474263] [] ? ktime_get_ts+0x4b/0x120 [ 403.474298] [] SyS_ioctl+0x97/0xa0 [ 403.474330] [] sysenter_do_call+0x12/0x22 [ 403.474364] Code: 55 f4 8b 45 f8 e9 75 ff ff ff 90 55 89 e5 53 83 ec 14 8b 08 8b 50 04 81 f9 00 01 10 00 74 24 81 fa 00 02 20 00 0f 84 8e 00 00 00 <8b> 1a 39 d8 75 62 8b 59 04 39 d8 75 35 89 51 04 89 0a 83 c4 14 [ 403.474566] EIP: [] __list_del_entry+0x20/0xe0 SS:ESP 0068:ee6a3c48 [ 403.476513] CR2: 000000006b6b6b6b v2: Missed the drm_object_unreference use after free (Ville) Daniel Vetter writes: Reported-by: Mika Kuoppala Cc: Ville Syrjälä Cc: Chris Wilson Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson [danvet: Add the Oops from Mika to the commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_evict.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 87875884770c..91b700155850 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -136,14 +136,17 @@ found: /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { + struct drm_gem_object *obj; vma = list_first_entry(&eviction_list, struct i915_vma, exec_list); + + obj = &vma->obj->base; + list_del_init(&vma->exec_list); if (ret == 0) ret = i915_vma_unbind(vma); - list_del_init(&vma->exec_list); - drm_gem_object_unreference(&vma->obj->base); + drm_gem_object_unreference(obj); } return ret; -- cgit v1.2.3 From 139ccd3fb12b3d17a773d2d61140f955a47fa470 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 19 Aug 2013 11:04:55 -0700 Subject: drm/i915: make IVB FDI training match spec v3 The existing code was trying different vswing and preemphasis settings in the wrong place, and wasn't trying them enough. So add a loop to walk through them, properly disabling FDI TX and RX in between if a failure is detected. v2: remove unneeded reg writes, add delays around bit lock checks (Jesse) v3: fix TX and RX disable per spec (Paulo) fix delays per spec (Paulo) make RX symbol lock check match TX bit lock check (Paulo) Signed-off-by: Jesse Barnes Reviewed-by: Paulo Zanoni Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51983 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 142 ++++++++++++++++++----------------- 1 file changed, 72 insertions(+), 70 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e600e1cfb6ea..7a40427823c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2597,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i; + u32 reg, temp, i, j; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -2613,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", I915_READ(FDI_RX_IIR(pipe))); - /* enable CPU FDI TX and PCH FDI RX */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_DP_PORT_WIDTH_MASK; - temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); - temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); - temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_TX_ENABLE); - - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_AUTO; - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; - temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_RX_ENABLE); + /* Try each vswing and preemphasis setting twice before moving on */ + for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { + /* disable first in case we need to retry */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); + temp &= ~FDI_TX_ENABLE; + I915_WRITE(reg, temp); - POSTING_READ(reg); - udelay(150); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_AUTO; + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp &= ~FDI_RX_ENABLE; + I915_WRITE(reg, temp); - for (i = 0; i < 4; i++) { + /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); + temp &= ~FDI_DP_PORT_WIDTH_MASK; + temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); + temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - I915_WRITE(reg, temp); + temp |= snb_b_fdi_train_param[j/2]; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_TX_ENABLE); - POSTING_READ(reg); - udelay(500); + I915_WRITE(FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_IIR(pipe); + reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if (temp & FDI_RX_BIT_LOCK || - (I915_READ(reg) & FDI_RX_BIT_LOCK)) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); - break; - } - } - if (i == 4) - DRM_ERROR("FDI train 1 fail!\n"); + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_RX_ENABLE); - /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE_IVB; - temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - I915_WRITE(reg, temp); + POSTING_READ(reg); + udelay(1); /* should be 0.5us */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; - I915_WRITE(reg, temp); + for (i = 0; i < 4; i++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - POSTING_READ(reg); - udelay(150); + if (temp & FDI_RX_BIT_LOCK || + (I915_READ(reg) & FDI_RX_BIT_LOCK)) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", + i); + break; + } + udelay(1); /* should be 0.5us */ + } + if (i == 4) { + DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); + continue; + } - for (i = 0; i < 4; i++) { + /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; + temp &= ~FDI_LINK_TRAIN_NONE_IVB; + temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; I915_WRITE(reg, temp); POSTING_READ(reg); - udelay(500); + udelay(2); /* should be 1.5us */ - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + for (i = 0; i < 4; i++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); - break; + if (temp & FDI_RX_SYMBOL_LOCK || + (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", + i); + goto train_done; + } + udelay(2); /* should be 1.5us */ } + if (i == 4) + DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); } - if (i == 4) - DRM_ERROR("FDI train 2 fail!\n"); +train_done: DRM_DEBUG_KMS("FDI train done.\n"); } -- cgit v1.2.3 From 8254860096df085d633207d4d68550bb2ca29f17 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 19 Aug 2013 19:32:00 +0100 Subject: drm/i915: Remove DSPARB_HWCONTROL() This define hasn't been used since: commit 652c393a3368af84359da37c45afc35a91144960 Author: Jesse Barnes Date: Mon Aug 17 13:31:43 2009 -0700 drm/i915: add dynamic clock frequency control Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4bd66e9a0450..15aede2eb458 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1562,8 +1562,6 @@ struct drm_i915_file_private { #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) -/* dsparb controlled by hw only */ -#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -- cgit v1.2.3 From fdaa930bee14abe5ed1d1aead5bc6a9a5660ccbf Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 19 Aug 2013 19:32:01 +0100 Subject: drm/i915: Remove HAS_PIPE_CONTROL() The code using this was removed in: commit 88f23b8fa3e6357c423af24ec31c661fc12f884b Author: Chris Wilson Date: Sun Dec 5 15:08:31 2010 +0000 drm/i915: Avoid using PIPE_CONTROL on Ironlake Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 15aede2eb458..29cccf3f0918 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1569,8 +1569,6 @@ struct drm_i915_file_private { #define HAS_IPS(dev) (IS_ULT(dev)) -#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) - #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) -- cgit v1.2.3 From 3abdb33410d8b130437613a2fe3d5bf667ca34da Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 19 Aug 2013 19:32:02 +0100 Subject: drm: Remove IS_IRONLAKE_D() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This define hasn't been used since: commit cfdf1fa23f4074c9f8766dc67a928bbf680b1ac9 Author: Kristian Høgsberg Date: Wed Dec 16 15:16:16 2009 -0500 drm/i915: Implement IS_* macros using static tables Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 29cccf3f0918..9bf28010c231 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1505,7 +1505,6 @@ struct drm_i915_file_private { #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ -- cgit v1.2.3 From e3ce7633ba38a97c2203ab60f381ce1642940328 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Mon, 19 Aug 2013 19:32:03 +0100 Subject: drm/i915: Remove I915_READ_{NOPID, SYNC_0, SYNC_1})() The code directly uses the registers and ring->mmio_base. Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5e6be842d225..432ad5311ba6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -33,10 +33,6 @@ struct intel_hw_status_page { #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) -#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) -#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) -#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) - enum intel_ring_hangcheck_action { HANGCHECK_WAIT, HANGCHECK_ACTIVE, -- cgit v1.2.3 From ec013e7f491cceef0e87190a3c6b132ce49f7ce4 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 20 Aug 2013 10:29:23 +0100 Subject: drm/i915: Expose energy counter on SNB+ through debugfs On SNB and IVB, there's an MSR (also exposed through MCHBAR) we can use to read out the amount of energy used over time. Expose this in sysfs to make it easy to do power comparisons with different configurations. If the platform supports it, the file will show up under the drm/card0/power subdirectory of the PCI device in sysfs as gt_energy_uJ. The value in the file is a running total of energy (in microjoules) consumed by the graphics device. v2: move to sysfs (Ben, Daniel) expose a simple value (Chris) drop unrelated hunk (Ben) Signed-off-by: Jesse Barnes v3: by Ben Tied it into existing rc6 sysfs entries and named that a more generic "power attrs." Fixed rebase conflicts. Signed-off-by: Ben Widawsky v4: Since RAPL is a real driver that already exists to serve power monitoring, place our entry in debugfs. This gives me a fallback location for systems that do not expose it otherwise. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 2 ++ 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4785d8c14654..236d97e51c3a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "intel_drv.h" #include "intel_ringbuffer.h" @@ -1769,6 +1770,27 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) return 0; } +static int i915_energy_uJ(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 power; + u32 units; + + if (INTEL_INFO(dev)->gen < 6) + return -ENODEV; + + rdmsrl(MSR_RAPL_POWER_UNIT, power); + power = (power & 0x1f00) >> 8; + units = 1000000 / (1 << power); /* convert to uJ */ + power = I915_READ(MCH_SECP_NRG_STTS); + power *= units; + + seq_printf(m, "%llu", (long long unsigned)power); + return 0; +} + static int i915_wedged_get(void *data, u64 *val) { @@ -2208,6 +2230,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_dpio", i915_dpio_info, 0}, {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, + {"i915_energy_uJ", i915_energy_uJ, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3c652eb7ee6c..c4c509895826 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1447,6 +1447,8 @@ #define MCH_SSKPD_WM0_MASK 0x3f #define MCH_SSKPD_WM0_VAL 0xc +#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) + /* Clocking configuration register */ #define CLKCFG 0x10c00 #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ -- cgit v1.2.3 From a40066412cc2ace1c1299e7a4d7a81dc33395b6f Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:11 -0300 Subject: drm/i915: add the FCLK case to intel_ddi_get_cdclk_freq We already have code to disable LCPLL and switch to FCLK, so we need this too. We still don't call the code to disable LCPLL, but we'll call it when we add support for Package C8+. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b8c096b4a1de..63aca49d11a8 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1139,10 +1139,13 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) { - if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) + uint32_t lcpll = I915_READ(LCPLL_CTL); + + if (lcpll & LCPLL_CD_SOURCE_FCLK) + return 800000; + else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) return 450000; - else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == - LCPLL_CLK_FREQ_450) + else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) return 450000; else if (IS_ULT(dev_priv->dev)) return 337500; -- cgit v1.2.3 From 43eaea131823c5ca13d03364e61bd15f0b22a0f7 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:12 -0300 Subject: drm/i915: wrap GTIMR changes Just like the functions that touch DEIMR and SDEIMR, but for GTIMR. The new functions contain a POSTING_READ(GTIMR) which was not present at the 2 callers inside i915_irq.c. The implementation is based on ibx_display_interrupt_update. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 34 +++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_drv.h | 3 +++ drivers/gpu/drm/i915/intel_ringbuffer.c | 22 ++++++--------------- 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 28d57477aa42..6bd4508666d2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -104,6 +104,34 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) } } +/** + * ilk_update_gt_irq - update GTIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + assert_spin_locked(&dev_priv->irq_lock); + + dev_priv->gt_irq_mask &= ~interrupt_mask; + dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); +} + +void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, mask); +} + +void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, 0); +} + static bool ivb_can_enable_err_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -806,8 +834,7 @@ static void ivybridge_parity_work(struct work_struct *work) I915_WRITE(GEN7_MISCCPCTL, misccpctl); spin_lock_irqsave(&dev_priv->irq_lock, flags); - dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -837,8 +864,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev) return; spin_lock(&dev_priv->irq_lock); - dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 01455aa8b8bb..a8462064714c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -778,5 +778,8 @@ extern void intel_edp_psr_update(struct drm_device *dev); extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, bool switch_to_fclk, bool allow_power_down); extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); +extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, + uint32_t mask); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 34777168f700..2e370804248f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (ring->irq_refcount++ == 0) + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (--ring->irq_refcount == 0) + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1028,9 +1022,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1051,9 +1043,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); else I915_WRITE_IMR(ring, ~0); - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); -- cgit v1.2.3 From edbfdb456053d0738e6b06a3827ead4158bfc918 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:13 -0300 Subject: drm/i915: wrap GEN6_PMIMR changes Just like we're doing with the other IMR changes. One of the functional changes is that not every caller was doing the POSTING_READ. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 47 ++++++++++++++++++++++++++++----- drivers/gpu/drm/i915/intel_drv.h | 3 +++ drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++---- 4 files changed, 46 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6bd4508666d2..af5c335a69db 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -132,6 +132,41 @@ void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) ilk_update_gt_irq(dev_priv, mask, 0); } +/** + * snb_update_pm_irq - update GEN6_PMIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void snb_update_pm_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t pmimr = I915_READ(GEN6_PMIMR); + pmimr &= ~interrupt_mask; + pmimr |= (~enabled_irq_mask & interrupt_mask); + + assert_spin_locked(&dev_priv->irq_lock); + + I915_WRITE(GEN6_PMIMR, pmimr); + POSTING_READ(GEN6_PMIMR); +} + +void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + snb_update_pm_irq(dev_priv, mask, mask); +} + +void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + snb_update_pm_irq(dev_priv, mask, 0); +} + +static void snb_set_pm_irq(struct drm_i915_private *dev_priv, uint32_t val) +{ + snb_update_pm_irq(dev_priv, 0xffffffff, ~val); +} + static bool ivb_can_enable_err_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -739,15 +774,14 @@ static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, rps.work); - u32 pm_iir, pm_imr; + u32 pm_iir; u8 new_delay; spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - pm_imr = I915_READ(GEN6_PMIMR); /* Make sure not to corrupt PMIMR state used by ringbuffer code */ - I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); + snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) @@ -921,8 +955,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); - POSTING_READ(GEN6_PMIMR); + snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1005,8 +1038,8 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, if (pm_iir & GEN6_PM_RPS_EVENTS) { spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); - /* never want to mask useful interrupts. (also posting read) */ + snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); + /* never want to mask useful interrupts. */ WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); spin_unlock(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a8462064714c..8222f2426b47 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -781,5 +781,8 @@ extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv, + uint32_t mask); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7bc3f1783174..4f0857346bfd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3450,7 +3450,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); + snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); /* only unmask PM interrupts we need. Mask all others. */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2e370804248f..7de29d40d1ad 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1062,10 +1062,8 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1084,10 +1082,8 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~0); - I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } -- cgit v1.2.3 From f52ecbcf8009ef18cda86b30efd837338cd25392 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:14 -0300 Subject: drm/i915: don't update GEN6_PMIMR when it's not needed I did some brief tests and the "new_val = pmimr" condition usually happens a few times after exiting games. Note: This is also prep work to track the GEN6_PMIMR register state in dev_priv->pm_imr. This happens in the next patch. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi [danvet: Add note to explain why we want this, as per the discussion between Chris and Paulo.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index af5c335a69db..efe3fc671e1e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -142,14 +142,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { - uint32_t pmimr = I915_READ(GEN6_PMIMR); - pmimr &= ~interrupt_mask; - pmimr |= (~enabled_irq_mask & interrupt_mask); + uint32_t pmimr, new_val; assert_spin_locked(&dev_priv->irq_lock); - I915_WRITE(GEN6_PMIMR, pmimr); - POSTING_READ(GEN6_PMIMR); + pmimr = new_val = I915_READ(GEN6_PMIMR); + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != pmimr) { + I915_WRITE(GEN6_PMIMR, new_val); + POSTING_READ(GEN6_PMIMR); + } } void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) -- cgit v1.2.3 From 605cd25b1ffa09a2f86b5c4bd120086dd5ea10a7 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:15 -0300 Subject: drm/i915: add dev_priv->pm_irq_mask Just like irq_mask and gt_irq_mask, use it to track the status of GEN6_PMIMR so we don't need to read it again every time we call snb_update_pm_irq. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9bf28010c231..f423a94c1b25 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1123,6 +1123,7 @@ typedef struct drm_i915_private { /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask; u32 gt_irq_mask; + u32 pm_irq_mask; struct work_struct hotplug_work; bool enable_hotplug_processing; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index efe3fc671e1e..8872e1955c45 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -142,16 +142,17 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { - uint32_t pmimr, new_val; + uint32_t new_val; assert_spin_locked(&dev_priv->irq_lock); - pmimr = new_val = I915_READ(GEN6_PMIMR); + new_val = dev_priv->pm_irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != pmimr) { - I915_WRITE(GEN6_PMIMR, new_val); + if (new_val != dev_priv->pm_irq_mask) { + dev_priv->pm_irq_mask = new_val; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); POSTING_READ(GEN6_PMIMR); } } @@ -2217,8 +2218,9 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) if (HAS_VEBOX(dev)) pm_irqs |= PM_VEBOX_USER_INTERRUPT; + dev_priv->pm_irq_mask = 0xffffffff; I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); - I915_WRITE(GEN6_PMIMR, 0xffffffff); + I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); I915_WRITE(GEN6_PMIER, pm_irqs); POSTING_READ(GEN6_PMIER); } -- cgit v1.2.3 From 333a820416ccb0e24974b6ebe7d447c0c28c7b76 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 6 Aug 2013 18:57:16 -0300 Subject: drm/i915: don't disable/reenable IVB error interrupts when not needed If the error interrupts are already disabled, don't disable and reenable them. This is going to be needed when we're in PC8+, where all the interrupts are disabled so we won't risk re-enabling DE_ERR_INT_IVB. v2: Use dev_priv->irq_mask (Chris) Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8872e1955c45..976113af1859 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1373,6 +1373,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; + bool err_int_reenable = false; atomic_inc(&dev_priv->irq_received); @@ -1401,7 +1402,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) * handler. */ if (IS_HASWELL(dev)) { spin_lock(&dev_priv->irq_lock); - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; + if (err_int_reenable) + ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); spin_unlock(&dev_priv->irq_lock); } @@ -1437,7 +1440,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) } } - if (IS_HASWELL(dev)) { + if (err_int_reenable) { spin_lock(&dev_priv->irq_lock); if (ivb_can_enable_err_int(dev)) ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); -- cgit v1.2.3 From 60611c137641af41895828cfc74f5be64ed69b49 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 15 Aug 2013 11:50:01 -0300 Subject: drm/i915: don't queue PM events we won't process On SNB/IVB/VLV we only call gen6_rps_irq_handler if one of the IIR bits set is part of GEN6_PM_RPS_EVENTS, but at gen6_rps_irq_handler we add all the enabled IIR bits to the work queue, not only the ones that are part of GEN6_PM_RPS_EVENTS. But then gen6_pm_rps_work only processes GEN6_PM_RPS_EVENTS, so it's useless to add anything that's not GEN6_PM_RPS_EVENTS to the work queue. As a bonus, gen6_rps_irq_handler looks more similar to hsw_pm_irq_handler, so we may be able to merge them in the future. v2: - Add a WARN in case we queued something we're not going to process. Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky (v1) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 976113af1859..c10d2f1af0be 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -789,6 +789,9 @@ static void gen6_pm_rps_work(struct work_struct *work) snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); + /* Make sure we didn't queue anything we're not going to process. */ + WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); + if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) return; @@ -959,7 +962,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, */ spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir; + dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); spin_unlock(&dev_priv->irq_lock); @@ -1128,7 +1131,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) gmbus_irq_handler(dev); - if (pm_iir & GEN6_PM_RPS_EVENTS) + if (pm_iir) gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); @@ -1433,7 +1436,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (pm_iir) { if (IS_HASWELL(dev)) hsw_pm_irq_handler(dev_priv, pm_iir); - else if (pm_iir & GEN6_PM_RPS_EVENTS) + else gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; -- cgit v1.2.3 From 4d3b3d5fd7d42a522a6c444388826bb23264db9f Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 9 Aug 2013 17:04:36 -0300 Subject: drm/i915: fix how we mask PMIMR when adding work to the queue It seems we've been doing this ever since we started processing the RPS events on a work queue, on commit "drm/i915: move gen6 rps handling to workqueue", 4912d04193733a825216b926ffd290fada88ab07. The problem is: when we add work to the queue, instead of just masking the bits we queued and leaving all the others on their current state, we mask the bits we queued and unmask all the others. This basically means we'll be unmasking a bunch of interrupts we're not going to process. And if you look at gen6_pm_rps_work, we unmask back only GEN6_PM_RPS_EVENTS, which means the bits we unmasked when adding work to the queue will remain unmasked after we process the queue. Notice that even though we unmask those unrelated interrupts, we never enable them on IER, so they don't fire our interrupt handler, they just stay there on IIR waiting to be cleared when something else triggers the interrupt handler. So this patch does what seems to make more sense: mask only the bits we add to the queue, without unmasking anything else, and so we'll unmask them after we process the queue. As a side effect we also have to remove that WARN, because it is not only making sure we don't mask useful interrupts, it is also making sure we do unmask useless interrupts! That piece of code should not be responsible for knowing which bits should be unmasked, so just don't assert anything, and trust that snb_disable_pm_irq should be doing the right thing. With i915.enable_pc8=1 I was getting ocasional "GEN6_PMIIR is not 0" error messages due to the fact that we unmask those unrelated interrupts but don't enable them. Note: if bugs start bisecting to this patch, then it probably means someone was relying on the fact that we unmask everything by accident, then we should fix gen5_gt_irq_postinstall or whoever needs the accidentally unmasked interrupts. Or maybe I was just wrong and we need to revert this patch :) Note: This started to be a more real issue with the addition of the VEBOX support since now we do enable more than just the minimal set of RPS interrupts in the IER register. Which means after the first rps interrupt has happened we will never mask the VEBOX user interrupts again and so will blow through cpu time needlessly when running video workloads. Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky [danvet: Add note that this started to matter with VEBOX much more.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c10d2f1af0be..e0c6f7d6189d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -167,11 +167,6 @@ void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) snb_update_pm_irq(dev_priv, mask, 0); } -static void snb_set_pm_irq(struct drm_i915_private *dev_priv, uint32_t val) -{ - snb_update_pm_irq(dev_priv, 0xffffffff, ~val); -} - static bool ivb_can_enable_err_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -963,7 +958,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); + snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1046,9 +1041,7 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, if (pm_iir & GEN6_PM_RPS_EVENTS) { spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); - /* never want to mask useful interrupts. */ - WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); + snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); -- cgit v1.2.3 From 1403c0d4d46f2eed2ab13b89561c853988ad7513 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 15 Aug 2013 11:51:32 -0300 Subject: drm/i915: merge HSW and SNB PM irq handlers Because hsw_pm_irq_handler does exactly what gen6_rps_irq_handler does and also processes the 2 additional VEBOX bits. So merge those functions and wrap the VEBOX bits on a HAS_VEBOX check. This check isn't really necessary since the bits are reserved on SNB/IVB/VLV, but it's a good documentation on who uses them. v2: - Change IS_HASWELL check to HAS_VEBOX Signed-off-by: Paulo Zanoni Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 50 ++++++++++------------------------------- 1 file changed, 12 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e0c6f7d6189d..caf83da17bb0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -942,28 +942,6 @@ static void snb_gt_irq_handler(struct drm_device *dev, ivybridge_parity_error_irq_handler(dev); } -/* Legacy way of handling PM interrupts */ -static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, - u32 pm_iir) -{ - /* - * IIR bits should never already be set because IMR should - * prevent an interrupt from being shown in IIR. The warning - * displays a case where we've unsafely cleared - * dev_priv->rps.pm_iir. Although missing an interrupt of the same - * type is not a problem, it displays a problem in the logic. - * - * The mask bit in IMR is cleared by dev_priv->rps.work. - */ - - spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); - spin_unlock(&dev_priv->irq_lock); - - queue_work(dev_priv->wq, &dev_priv->rps.work); -} - #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 @@ -1030,13 +1008,10 @@ static void dp_aux_irq_handler(struct drm_device *dev) wake_up_all(&dev_priv->gmbus_wait_queue); } -/* Unlike gen6_rps_irq_handler() from which this function is originally derived, - * we must be able to deal with other PM interrupts. This is complicated because - * of the way in which we use the masks to defer the RPS work (which for - * posterity is necessary because of forcewake). - */ -static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, - u32 pm_iir) +/* The RPS events need forcewake, so we add them to a work queue and mask their + * IMR bits until the work is done. Other interrupts can be processed without + * the work queue. */ +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { if (pm_iir & GEN6_PM_RPS_EVENTS) { spin_lock(&dev_priv->irq_lock); @@ -1047,12 +1022,14 @@ static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, queue_work(dev_priv->wq, &dev_priv->rps.work); } - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); + if (HAS_VEBOX(dev_priv->dev)) { + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { - DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); - i915_handle_error(dev_priv->dev, false); + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { + DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); + i915_handle_error(dev_priv->dev, false); + } } } @@ -1427,10 +1404,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (INTEL_INFO(dev)->gen >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { - if (IS_HASWELL(dev)) - hsw_pm_irq_handler(dev_priv, pm_iir); - else - gen6_rps_irq_handler(dev_priv, pm_iir); + gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; } -- cgit v1.2.3 From 5032d871f7d300aee10c309ea004eb4f851553fe Mon Sep 17 00:00:00 2001 From: Rafael Barbalho Date: Wed, 21 Aug 2013 17:10:51 +0100 Subject: drm/i915: Cleaning up the relocate entry function As the relocate entry function was getting a bit too big I've moved the code that used to use either the cpu or the gtt to for the relocation into two separate functions. Signed-off-by: Rafael Barbalho Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 88 ++++++++++++++++++------------ 1 file changed, 54 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7dcf78cf6781..792c52a235ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -171,6 +171,56 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) obj->cache_level != I915_CACHE_NONE); } +static int +relocate_entry_cpu(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc) +{ + uint32_t page_offset = offset_in_page(reloc->offset); + char *vaddr; + int ret = -EINVAL; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + + return 0; +} + +static int +relocate_entry_gtt(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + int ret = -EINVAL; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += i915_gem_obj_ggtt_offset(obj); + reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + offset_in_page(reloc->offset)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + + return 0; +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, @@ -255,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -EFAULT; reloc->delta += target_offset; - if (use_cpu_reloc(obj)) { - uint32_t page_offset = offset_in_page(reloc->offset); - char *vaddr; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) - return ret; - - vaddr = kmap_atomic(i915_gem_object_get_page(obj, - reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = reloc->delta; - kunmap_atomic(vaddr); - } else { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; - - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; - - /* Map the page containing the relocation we're going to perform. */ - reloc->offset += i915_gem_obj_ggtt_offset(obj); - reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc->offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + offset_in_page(reloc->offset)); - iowrite32(reloc->delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - } + if (use_cpu_reloc(obj)) + ret = relocate_entry_cpu(obj, reloc); + else + ret = relocate_entry_gtt(obj, reloc); /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; -- cgit v1.2.3 From 3414caf63421762e57b26aa999e5187b42ee1606 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 21 Aug 2013 08:08:55 -0700 Subject: drm/i915: drop WaMbcDriverBootEnable workaround MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turns out the BIOS will do this for us as needed, and if we try to do it again we risk hangs or other bad behavior. Note that this seems to break libva on ChromeOS after resumes (but strangely _not_ after booting up). This essentially reverts commit b4ae3f22d238617ca11610b29fde16cf8c0bc6e0 Author: Jesse Barnes Date: Thu Jun 14 11:04:48 2012 -0700 drm/i915: load boot context at driver init time and commit b3bf076697a68a8577f4a5f7407de0bb2b3b56ac Author: Paulo Zanoni Date: Tue Nov 20 13:27:44 2012 -0200 drm/i915: implement WaMbcDriverBootEnable on Haswell Signed-off-by: Jesse Barnes Reported-and-Tested-by: Stéphane Marchesin [danvet: Add note about impact and regression citation.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4f0857346bfd..cbab95dce352 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4864,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) ILK_DPARBUNIT_CLOCK_GATE_ENABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); - /* WaMbcDriverBootEnable:snb */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - g4x_disable_trickle_feed(dev); /* The default value should be 0x200 according to docs, but the two @@ -4963,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - /* WaMbcDriverBootEnable:hsw */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -5050,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) g4x_disable_trickle_feed(dev); - /* WaMbcDriverBootEnable:ivb */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* WaVSRefCountFullforceMissDisable:ivb */ gen7_setup_fixed_func_scheduler(dev_priv); @@ -5113,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - /* WaMbcDriverBootEnable:vlv */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in * flickering pixels due to Z write ordering failures after -- cgit v1.2.3 From 215733fadb87709e91b3a622d786865292c9ab11 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:07 -0300 Subject: drm/i915: grab force_wake when restoring LCPLL If LCPLL is disabled, there's a chance we might be in package C8 state or deeper, and we'll get a hard hang when restoring LCPLL (also, a red led lights up on my motherboard). So grab the force_wake, which will get us out of RC6 and, as a consequence, out of PC8+ (since we need RC6 to get into PC8+). Note: Discussions with hw designers are still ongoing what exactly goes boom here. But I think we can go ahead and just merge this little hack for now until it's clear what we actually need. Signed-off-by: Paulo Zanoni [danvet: Add small note about the current state of the discussion around this hack.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7a40427823c7..a2c8cb360ae8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6033,6 +6033,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv) LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) return; + /* Make sure we're not on PC8 state before disabling PC8, otherwise + * we'll hang the machine! */ + dev_priv->uncore.funcs.force_wake_get(dev_priv); + if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; I915_WRITE(LCPLL_CTL, val); @@ -6060,6 +6064,8 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv) LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); } + + dev_priv->uncore.funcs.force_wake_put(dev_priv); } static void haswell_modeset_global_resources(struct drm_device *dev) -- cgit v1.2.3 From bd633a7c1ca0663ba10426a0a6aeda0257cbe804 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:08 -0300 Subject: drm/i915: fix SDEIMR assertion when disabling LCPLL This was causing WARNs in one machine, so instead of trying to guess exactly which hotplug bits should exist, just do the test on the non-HPD bits. We don't care about the state of the hotplug bits, we just care about the others, that need to be 1. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2c8cb360ae8..fc92773ef552 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5932,11 +5932,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) struct intel_ddi_plls *plls = &dev_priv->ddi_plls; struct intel_crtc *crtc; unsigned long irqflags; - uint32_t val, pch_hpd_mask; - - pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT; - if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)) - pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT; + uint32_t val; list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", @@ -5962,7 +5958,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) WARN((val & ~DE_PCH_EVENT_IVB) != val, "Unexpected DEIMR bits enabled: 0x%x\n", val); val = I915_READ(SDEIMR); - WARN((val & ~pch_hpd_mask) != val, + WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, "Unexpected SDEIMR bits enabled: 0x%x\n", val); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -- cgit v1.2.3 From c67a470b1db781c54be07a87217cff35a91f564e Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:09 -0300 Subject: drm/i915: allow package C8+ states on Haswell (disabled) This patch allows PC8+ states on Haswell. These states can only be reached when all the display outputs are disabled, and they allow some more power savings. The fact that the graphics device is allowing PC8+ doesn't mean that the machine will actually enter PC8+: all the other devices also need to allow PC8+. For now this option is disabled by default. You need i915.allow_pc8=1 if you want it. This patch adds a big comment inside i915_drv.h explaining how it works and how it tracks things. Read it. v2: (this is not really v2, many previous versions were already sent, but they had different names) - Use the new functions to enable/disable GTIMR and GEN6_PMIMR - Rename almost all variables and functions to names suggested by Chris - More WARNs on the IRQ handling code - Also disable PC8 when there's GPU work to do (thanks to Ben for the help on this), so apps can run caster - Enable PC8 on a delayed work function that is delayed for 5 seconds. This makes sure we only enable PC8+ if we're really idle - Make sure we're not in PC8+ when suspending v3: - WARN if IRQs are disabled on __wait_seqno - Replace some DRM_ERRORs with WARNs - Fix calls to restore GT and PM interrupts - Use intel_mark_busy instead of intel_ring_advance to disable PC8 v4: - Use the force_wake, Luke! v5: - Remove the "IIR is not zero" WARNs - Move the force_wake chunk to its own patch - Only restore what's missing from RC6, not everything Signed-off-by: Paulo Zanoni Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 10 +++ drivers/gpu/drm/i915/i915_drv.c | 11 +++ drivers/gpu/drm/i915/i915_drv.h | 72 +++++++++++++++ drivers/gpu/drm/i915/i915_gem.c | 2 + drivers/gpu/drm/i915/i915_irq.c | 101 +++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 170 ++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_dp.c | 3 + drivers/gpu/drm/i915/intel_drv.h | 8 ++ drivers/gpu/drm/i915/intel_i2c.c | 2 + drivers/gpu/drm/i915/intel_pm.c | 13 ++- 10 files changed, 390 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5a051eaab9ef..09e8ef910ec5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1486,6 +1486,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->pc8.lock); + dev_priv->pc8.requirements_met = false; + dev_priv->pc8.gpu_idle = false; + dev_priv->pc8.irqs_disabled = false; + dev_priv->pc8.enabled = false; + dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ + INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); + i915_dump_device_info(dev_priv); /* Not all pre-production machines fall into this category, only the @@ -1740,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev) cancel_work_sync(&dev_priv->gpu_error.work); i915_destroy_error_state(dev); + cancel_delayed_work_sync(&dev_priv->pc8.enable_work); + if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index fd9fb2c25691..84d48b82e3f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -141,6 +141,10 @@ module_param_named(fastboot, i915_fastboot, bool, 0600); MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " "(default: false)"); +int i915_enable_pc8 __read_mostly = 0; +module_param_named(enable_pc8, i915_enable_pc8, int, 0600); +MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: false)"); + bool i915_prefault_disable __read_mostly; module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); MODULE_PARM_DESC(prefault_disable, @@ -557,6 +561,9 @@ static int i915_drm_freeze(struct drm_device *dev) dev_priv->modeset_restore = MODESET_SUSPENDED; mutex_unlock(&dev_priv->modeset_restore_lock); + /* We do a lot of poking in a lot of registers, make sure they work + * properly. */ + hsw_disable_package_c8(dev_priv); intel_set_power_well(dev, true); drm_kms_helper_poll_disable(dev); @@ -713,6 +720,10 @@ static int __i915_drm_thaw(struct drm_device *dev) schedule_work(&dev_priv->console_resume_work); } + /* Undo what we did at i915_drm_freeze so the refcount goes back to the + * expected level. */ + hsw_enable_package_c8(dev_priv); + mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f423a94c1b25..627c8216db1f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1076,6 +1076,75 @@ struct intel_wm_level { uint32_t fbc_val; }; +/* + * This struct tracks the state needed for the Package C8+ feature. + * + * Package states C8 and deeper are really deep PC states that can only be + * reached when all the devices on the system allow it, so even if the graphics + * device allows PC8+, it doesn't mean the system will actually get to these + * states. + * + * Our driver only allows PC8+ when all the outputs are disabled, the power well + * is disabled and the GPU is idle. When these conditions are met, we manually + * do the other conditions: disable the interrupts, clocks and switch LCPLL + * refclk to Fclk. + * + * When we really reach PC8 or deeper states (not just when we allow it) we lose + * the state of some registers, so when we come back from PC8+ we need to + * restore this state. We don't get into PC8+ if we're not in RC6, so we don't + * need to take care of the registers kept by RC6. + * + * The interrupt disabling is part of the requirements. We can only leave the + * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we + * can lock the machine. + * + * Ideally every piece of our code that needs PC8+ disabled would call + * hsw_disable_package_c8, which would increment disable_count and prevent the + * system from reaching PC8+. But we don't have a symmetric way to do this for + * everything, so we have the requirements_met and gpu_idle variables. When we + * switch requirements_met or gpu_idle to true we decrease disable_count, and + * increase it in the opposite case. The requirements_met variable is true when + * all the CRTCs, encoders and the power well are disabled. The gpu_idle + * variable is true when the GPU is idle. + * + * In addition to everything, we only actually enable PC8+ if disable_count + * stays at zero for at least some seconds. This is implemented with the + * enable_work variable. We do this so we don't enable/disable PC8 dozens of + * consecutive times when all screens are disabled and some background app + * queries the state of our connectors, or we have some application constantly + * waking up to use the GPU. Only after the enable_work function actually + * enables PC8+ the "enable" variable will become true, which means that it can + * be false even if disable_count is 0. + * + * The irqs_disabled variable becomes true exactly after we disable the IRQs and + * goes back to false exactly before we reenable the IRQs. We use this variable + * to check if someone is trying to enable/disable IRQs while they're supposed + * to be disabled. This shouldn't happen and we'll print some error messages in + * case it happens, but if it actually happens we'll also update the variables + * inside struct regsave so when we restore the IRQs they will contain the + * latest expected values. + * + * For more, read "Display Sequences for Package C8" on our documentation. + */ +struct i915_package_c8 { + bool requirements_met; + bool gpu_idle; + bool irqs_disabled; + /* Only true after the delayed work task actually enables it. */ + bool enabled; + int disable_count; + struct mutex lock; + struct delayed_work enable_work; + + struct { + uint32_t deimr; + uint32_t sdeimr; + uint32_t gtimr; + uint32_t gtier; + uint32_t gen6_pmimr; + } regsave; +}; + typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1260,6 +1329,8 @@ typedef struct drm_i915_private { uint16_t cur_latency[5]; } wm; + struct i915_package_c8 pc8; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; @@ -1635,6 +1706,7 @@ extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; extern bool i915_fastboot __read_mostly; +extern int i915_enable_pc8 __read_mostly; extern bool i915_prefault_disable __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 449575b85b31..23c42567631e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1005,6 +1005,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, bool wait_forever = true; int ret; + WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); + if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index caf83da17bb0..a03b445ceb5f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.deimr &= ~mask; + return; + } + if ((dev_priv->irq_mask & mask) != 0) { dev_priv->irq_mask &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask); @@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.deimr |= mask; + return; + } + if ((dev_priv->irq_mask & mask) != mask) { dev_priv->irq_mask |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask); @@ -116,6 +128,14 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, { assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; + dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + dev_priv->gt_irq_mask &= ~interrupt_mask; dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); @@ -146,6 +166,14 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; + dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + new_val = dev_priv->pm_irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); @@ -257,6 +285,15 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled && + (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; + dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + I915_WRITE(SDEIMR, sdeimr); POSTING_READ(SDEIMR); } @@ -3113,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + +/* Disable interrupts so we can allow Package C8+. */ +void hsw_pc8_disable_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); + dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); + dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); + dev_priv->pc8.regsave.gtier = I915_READ(GTIER); + dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); + + ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); + ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); + ilk_disable_gt_irq(dev_priv, 0xffffffff); + snb_disable_pm_irq(dev_priv, 0xffffffff); + + dev_priv->pc8.irqs_disabled = true; + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +/* Restore interrupts so we can recover from Package C8+. */ +void hsw_pc8_restore_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + uint32_t val, expected; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + val = I915_READ(DEIMR); + expected = ~DE_PCH_EVENT_IVB; + WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); + + val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; + expected = ~SDE_HOTPLUG_MASK_CPT; + WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", + val, expected); + + val = I915_READ(GTIMR); + expected = 0xffffffff; + WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); + + val = I915_READ(GEN6_PMIMR); + expected = 0xffffffff; + WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, + expected); + + dev_priv->pc8.irqs_disabled = false; + + ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); + ibx_enable_display_interrupt(dev_priv, + ~dev_priv->pc8.regsave.sdeimr & + ~SDE_HOTPLUG_MASK_CPT); + ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); + snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); + I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fc92773ef552..1fad9de3d810 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6064,6 +6064,166 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv) dev_priv->uncore.funcs.force_wake_put(dev_priv); } +void hsw_enable_pc8_work(struct work_struct *__work) +{ + struct drm_i915_private *dev_priv = + container_of(to_delayed_work(__work), struct drm_i915_private, + pc8.enable_work); + struct drm_device *dev = dev_priv->dev; + uint32_t val; + + if (dev_priv->pc8.enabled) + return; + + DRM_DEBUG_KMS("Enabling package C8+\n"); + + dev_priv->pc8.enabled = true; + + if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } + + lpt_disable_clkout_dp(dev); + hsw_pc8_disable_interrupts(dev); + hsw_disable_lcpll(dev_priv, true, true); +} + +static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) +{ + WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); + WARN(dev_priv->pc8.disable_count < 1, + "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); + + dev_priv->pc8.disable_count--; + if (dev_priv->pc8.disable_count != 0) + return; + + schedule_delayed_work(&dev_priv->pc8.enable_work, + msecs_to_jiffies(5 * 1000)); +} + +static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + uint32_t val; + + WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); + WARN(dev_priv->pc8.disable_count < 0, + "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); + + dev_priv->pc8.disable_count++; + if (dev_priv->pc8.disable_count != 1) + return; + + cancel_delayed_work_sync(&dev_priv->pc8.enable_work); + if (!dev_priv->pc8.enabled) + return; + + DRM_DEBUG_KMS("Disabling package C8+\n"); + + hsw_restore_lcpll(dev_priv); + hsw_pc8_restore_interrupts(dev); + lpt_init_pch_refclk(dev); + + if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val |= PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } + + intel_prepare_ddi(dev); + i915_gem_init_swizzling(dev); + mutex_lock(&dev_priv->rps.hw_lock); + gen6_update_ring_freq(dev); + mutex_unlock(&dev_priv->rps.hw_lock); + dev_priv->pc8.enabled = false; +} + +void hsw_enable_package_c8(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->pc8.lock); + __hsw_enable_package_c8(dev_priv); + mutex_unlock(&dev_priv->pc8.lock); +} + +void hsw_disable_package_c8(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->pc8.lock); + __hsw_disable_package_c8(dev_priv); + mutex_unlock(&dev_priv->pc8.lock); +} + +static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + uint32_t val; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) + if (crtc->base.enabled) + return false; + + /* This case is still possible since we have the i915.disable_power_well + * parameter and also the KVMr or something else might be requesting the + * power well. */ + val = I915_READ(HSW_PWR_WELL_DRIVER); + if (val != 0) { + DRM_DEBUG_KMS("Not enabling PC8: power well on\n"); + return false; + } + + return true; +} + +/* Since we're called from modeset_global_resources there's no way to + * symmetrically increase and decrease the refcount, so we use + * dev_priv->pc8.requirements_met to track whether we already have the refcount + * or not. + */ +static void hsw_update_package_c8(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool allow; + + if (!i915_enable_pc8) + return; + + mutex_lock(&dev_priv->pc8.lock); + + allow = hsw_can_enable_package_c8(dev_priv); + + if (allow == dev_priv->pc8.requirements_met) + goto done; + + dev_priv->pc8.requirements_met = allow; + + if (allow) + __hsw_enable_package_c8(dev_priv); + else + __hsw_disable_package_c8(dev_priv); + +done: + mutex_unlock(&dev_priv->pc8.lock); +} + +static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->pc8.gpu_idle) { + dev_priv->pc8.gpu_idle = true; + hsw_enable_package_c8(dev_priv); + } +} + +static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) +{ + if (dev_priv->pc8.gpu_idle) { + dev_priv->pc8.gpu_idle = false; + hsw_disable_package_c8(dev_priv); + } +} + static void haswell_modeset_global_resources(struct drm_device *dev) { bool enable = false; @@ -6079,6 +6239,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev) } intel_set_power_well(dev, enable); + + hsw_update_package_c8(dev); } static int haswell_crtc_mode_set(struct drm_crtc *crtc, @@ -7310,13 +7472,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) void intel_mark_busy(struct drm_device *dev) { - i915_update_gfx_val(dev->dev_private); + struct drm_i915_private *dev_priv = dev->dev_private; + + hsw_package_c8_gpu_busy(dev_priv); + i915_update_gfx_val(dev_priv); } void intel_mark_idle(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; + hsw_package_c8_gpu_idle(dev_priv); + if (!i915_powersave) return; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2726d4d41722..2151d13772b8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -344,6 +344,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, else precharge = 5; + intel_aux_display_runtime_get(dev_priv); + /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = I915_READ_NOTRACE(ch_ctl); @@ -434,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ret = recv_bytes; out: pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); + intel_aux_display_runtime_put(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8222f2426b47..176080822a74 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -745,6 +745,7 @@ extern void intel_set_power_well(struct drm_device *dev, bool enable); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); extern void ironlake_teardown_rc6(struct drm_device *dev); +void gen6_update_ring_freq(struct drm_device *dev); extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); @@ -784,5 +785,12 @@ extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void hsw_enable_pc8_work(struct work_struct *__work); +extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv); +extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv); +extern void hsw_pc8_disable_interrupts(struct drm_device *dev); +extern void hsw_pc8_restore_interrupts(struct drm_device *dev); +extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); +extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 639fe192997c..d1c1e0f7f262 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter, int i, reg_offset; int ret = 0; + intel_aux_display_runtime_get(dev_priv); mutex_lock(&dev_priv->gmbus_mutex); if (bus->force_bit) { @@ -497,6 +498,7 @@ timeout: out: mutex_unlock(&dev_priv->gmbus_mutex); + intel_aux_display_runtime_put(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cbab95dce352..0150ba598bf0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3607,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_gt_force_wake_put(dev_priv); } -static void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; @@ -5398,6 +5398,17 @@ void intel_init_power_well(struct drm_device *dev) I915_WRITE(HSW_PWR_WELL_BIOS, 0); } +/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */ +void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) +{ + hsw_disable_package_c8(dev_priv); +} + +void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) +{ + hsw_enable_package_c8(dev_priv); +} + /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_device *dev) { -- cgit v1.2.3 From 371db66add2ef701abd3f4295c4cd6bbc24cd5ca Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:10 -0300 Subject: drm/i915: add i915_pc8_status debugfs file Make it print the value of the variables on the PC8 struct. v2: Update to recent renames and add the new fields. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 236d97e51c3a..39df30e7d9af 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1788,6 +1788,31 @@ static int i915_energy_uJ(struct seq_file *m, void *data) power *= units; seq_printf(m, "%llu", (long long unsigned)power); + + return 0; +} + +static int i915_pc8_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!IS_HASWELL(dev)) { + seq_puts(m, "not supported\n"); + return 0; + } + + mutex_lock(&dev_priv->pc8.lock); + seq_printf(m, "Requirements met: %s\n", + yesno(dev_priv->pc8.requirements_met)); + seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle)); + seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); + seq_printf(m, "IRQs disabled: %s\n", + yesno(dev_priv->pc8.irqs_disabled)); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); + mutex_unlock(&dev_priv->pc8.lock); + return 0; } @@ -2231,6 +2256,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, + {"i915_pc8_status", i915_pc8_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) -- cgit v1.2.3 From 900587453219f6090a1e28db1bb790aa64820131 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:11 -0300 Subject: drm/i915: add i915.pc8_timeout function We currently only enter PC8+ after all its required conditions are met, there's no rendering, and we stay like that for at least 5 seconds. I chose "5 seconds" because this value is conservative and won't make us enter/leave PC8+ thousands of times after the screen is off: some desktop environments have applications that wake up and do rendering every 1-3 seconds, even when the screen is off and the machine is completely idle. But when I was testing my PC8+ patches I set the default value to 100ms so I could use the bad-behaving desktop environments to stress-test my patches. I also thought it would be a good idea to ask our power management team to test different values, but I'm pretty sure they would ask me for an easy way to change the timeout. So to help these 2 cases I decided to create an option that would make it easier to change the default value. I also expect people making specific products that use our driver could try to find the perfect timeout for them. Anyway, fixing the bad-behaving applications will always lead to better power savings than just changing the timeout value: you need to stop waking the Kernel, not quickly put it back to sleep again after you wake it for nothing. Bad sleep leads to bad mood! Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 84d48b82e3f1..6dc00a190669 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -145,6 +145,10 @@ int i915_enable_pc8 __read_mostly = 0; module_param_named(enable_pc8, i915_enable_pc8, int, 0600); MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: false)"); +int i915_pc8_timeout __read_mostly = 5000; +module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); +MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)"); + bool i915_prefault_disable __read_mostly; module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); MODULE_PARM_DESC(prefault_disable, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 627c8216db1f..5f8a638c5145 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1707,6 +1707,7 @@ extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; extern bool i915_fastboot __read_mostly; extern int i915_enable_pc8 __read_mostly; +extern int i915_pc8_timeout __read_mostly; extern bool i915_prefault_disable __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1fad9de3d810..b29954ac6bfd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6101,7 +6101,7 @@ static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) return; schedule_delayed_work(&dev_priv->pc8.enable_work, - msecs_to_jiffies(5 * 1000)); + msecs_to_jiffies(i915_pc8_timeout)); } static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From e27e9708c45879f16fb824a2da94cd65e150a0c8 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 19 Aug 2013 13:18:12 -0300 Subject: drm/i915: enable Package C8+ by default This should be working, so enable it by default. Also easy to revert. v2: Rebase, s/allow/enable/. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6dc00a190669..beb295634a49 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -141,9 +141,9 @@ module_param_named(fastboot, i915_fastboot, bool, 0600); MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " "(default: false)"); -int i915_enable_pc8 __read_mostly = 0; +int i915_enable_pc8 __read_mostly = 1; module_param_named(enable_pc8, i915_enable_pc8, int, 0600); -MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: false)"); +MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)"); int i915_pc8_timeout __read_mostly = 5000; module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); -- cgit v1.2.3 From 35d8f2eb259e2d32c4bb67e9733ba0cba031f64f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Aug 2013 23:38:08 +0200 Subject: drm/i915: Use POSTING_READ in lcpll code If we don't use the return value of a mmio read our coding style is to use the POSTING_READ macro. This avoids cluttering the mmio traces. While at it add the missing posting read in the lcpll enable function that Paulo spotted. v2: Drop the _NOTRACE changes, tracing such wait_for loops in the modeset code might actually be rather useful! Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b29954ac6bfd..b4daa640a6d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6036,13 +6036,14 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv) if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); } val = I915_READ(D_COMP); val |= D_COMP_COMP_FORCE; val &= ~D_COMP_COMP_DISABLE; I915_WRITE(D_COMP, val); - I915_READ(D_COMP); + POSTING_READ(D_COMP); val = I915_READ(LCPLL_CTL); val &= ~LCPLL_PLL_DISABLE; -- cgit v1.2.3 From e8016055335687b90e7cd5bbfa30e0c269417f34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 22 Aug 2013 19:23:13 +0300 Subject: drm/i915: Fix context size calculation on SNB/IVB/VLV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All the different context sizes reported in the CXT_SIZE register aren't meant to be simply added together. While BSpec is somewhat unclear on the topic of the actual context size, empirical tests have now revealed the truth. So let's add a big fat comment to remind people how it all works. As a result of correctly interpreting CXT_SIZE, the IVB context size is reduced from three pages to two, while SNB context size remains at two pages. Signed-off-by: Ville Syrjälä Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c4c509895826..76d965c38d7e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1705,15 +1705,26 @@ */ #define CCID 0x2180 #define CCID_EN (1<<0) +/* + * Notes on SNB/IVB/VLV context size: + * - Power context is saved elsewhere (LLC or stolen) + * - Ring/execlist context is saved on SNB, not on IVB + * - Extended context size already includes render context size + * - We always need to follow the extended context size. + * SNB BSpec has comments indicating that we should use the + * render context size instead if execlists are disabled, but + * based on empirical testing that's just nonsense. + * - Pipelined/VF state is saved on SNB/IVB respectively + * - GT1 size just indicates how much of render context + * doesn't need saving on GT1 + */ #define CXT_SIZE 0x21a0 #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) -#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ - GEN6_CXT_RING_SIZE(cxt_reg) + \ - GEN6_CXT_RENDER_SIZE(cxt_reg) + \ +#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) #define GEN7_CXT_SIZE 0x21a8 @@ -1723,11 +1734,7 @@ #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) -#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ - GEN7_CXT_RING_SIZE(ctx_reg) + \ - GEN7_CXT_RENDER_SIZE(ctx_reg) + \ - GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ - GEN7_CXT_GT1_SIZE(ctx_reg) + \ +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) /* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full -- cgit v1.2.3 From fb1ae911f4e58c2cf28fcd48b59f54d17283da07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 22 Aug 2013 19:21:30 +0300 Subject: drm/i915: Print seqnos as unsigned in debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I don't like seeing signed seqnos. Make them unsigned. Signed-off-by: Ville Syrjälä Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 39df30e7d9af..55ab9246e1b9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -100,7 +100,7 @@ static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct i915_vma *vma; - seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), -- cgit v1.2.3 From 0cf6c71d70d8aa39b8fd0e39c9009602a0e0d300 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 24 Jun 2013 17:12:04 -0400 Subject: drm/msm: add register definitions Generated from rnndb files in: https://github.com/freedreno/envytools Keep this split out as a separate commit to make it easier to review the actual driver. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.xml.h | 502 ++++++++++++++++ drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | 114 ++++ drivers/gpu/drm/msm/dsi/sfpb.xml.h | 48 ++ drivers/gpu/drm/msm/hdmi/hdmi.xml.h | 508 ++++++++++++++++ drivers/gpu/drm/msm/hdmi/qfprom.xml.h | 50 ++ drivers/gpu/drm/msm/mdp4/mdp4.xml.h | 1061 +++++++++++++++++++++++++++++++++ 6 files changed, 2283 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi/dsi.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/mmss_cc.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/sfpb.xml.h create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.xml.h create mode 100644 drivers/gpu/drm/msm/hdmi/qfprom.xml.h create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4.xml.h diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h new file mode 100644 index 000000000000..6f8396be431d --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -0,0 +1,502 @@ +#ifndef DSI_XML +#define DSI_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum dsi_traffic_mode { + NON_BURST_SYNCH_PULSE = 0, + NON_BURST_SYNCH_EVENT = 1, + BURST_MODE = 2, +}; + +enum dsi_dst_format { + DST_FORMAT_RGB565 = 0, + DST_FORMAT_RGB666 = 1, + DST_FORMAT_RGB666_LOOSE = 2, + DST_FORMAT_RGB888 = 3, +}; + +enum dsi_rgb_swap { + SWAP_RGB = 0, + SWAP_RBG = 1, + SWAP_BGR = 2, + SWAP_BRG = 3, + SWAP_GRB = 4, + SWAP_GBR = 5, +}; + +enum dsi_cmd_trigger { + TRIGGER_NONE = 0, + TRIGGER_TE = 2, + TRIGGER_SW = 4, + TRIGGER_SW_SEOF = 5, + TRIGGER_SW_TE = 6, +}; + +#define DSI_IRQ_CMD_DMA_DONE 0x00000001 +#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 +#define DSI_IRQ_CMD_MDP_DONE 0x00000100 +#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200 +#define DSI_IRQ_VIDEO_DONE 0x00010000 +#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000 +#define DSI_IRQ_ERROR 0x01000000 +#define DSI_IRQ_MASK_ERROR 0x02000000 +#define REG_DSI_CTRL 0x00000000 +#define DSI_CTRL_ENABLE 0x00000001 +#define DSI_CTRL_VID_MODE_EN 0x00000002 +#define DSI_CTRL_CMD_MODE_EN 0x00000004 +#define DSI_CTRL_LANE0 0x00000010 +#define DSI_CTRL_LANE1 0x00000020 +#define DSI_CTRL_LANE2 0x00000040 +#define DSI_CTRL_LANE3 0x00000080 +#define DSI_CTRL_CLK_EN 0x00000100 +#define DSI_CTRL_ECC_CHECK 0x00100000 +#define DSI_CTRL_CRC_CHECK 0x01000000 + +#define REG_DSI_STATUS0 0x00000004 +#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002 +#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008 +#define DSI_STATUS0_DSI_BUSY 0x00000010 + +#define REG_DSI_FIFO_STATUS 0x00000008 + +#define REG_DSI_VID_CFG0 0x0000000c +#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003 +#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0 +static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val) +{ + return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK; +} +#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030 +#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4 +static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val) +{ + return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK; +} +#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300 +#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8 +static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val) +{ + return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK; +} +#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000 +#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000 +#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000 +#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000 +#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000 +#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000 + +#define REG_DSI_VID_CFG1 0x0000001c +#define DSI_VID_CFG1_R_SEL 0x00000010 +#define DSI_VID_CFG1_G_SEL 0x00000100 +#define DSI_VID_CFG1_B_SEL 0x00001000 +#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000 +#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16 +static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK; +} +#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000 +#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20 +static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val) +{ + return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK; +} + +#define REG_DSI_ACTIVE_H 0x00000020 +#define DSI_ACTIVE_H_START__MASK 0x00000fff +#define DSI_ACTIVE_H_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_H_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK; +} +#define DSI_ACTIVE_H_END__MASK 0x0fff0000 +#define DSI_ACTIVE_H_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_H_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK; +} + +#define REG_DSI_ACTIVE_V 0x00000024 +#define DSI_ACTIVE_V_START__MASK 0x00000fff +#define DSI_ACTIVE_V_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_V_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK; +} +#define DSI_ACTIVE_V_END__MASK 0x0fff0000 +#define DSI_ACTIVE_V_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_V_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK; +} + +#define REG_DSI_TOTAL 0x00000028 +#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff +#define DSI_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK; +} +#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000 +#define DSI_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK; +} + +#define REG_DSI_ACTIVE_HSYNC 0x0000002c +#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff +#define DSI_ACTIVE_HSYNC_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK; +} +#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000 +#define DSI_ACTIVE_HSYNC_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK; +} + +#define REG_DSI_ACTIVE_VSYNC 0x00000034 +#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff +#define DSI_ACTIVE_VSYNC_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK; +} +#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000 +#define DSI_ACTIVE_VSYNC_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK; +} + +#define REG_DSI_CMD_DMA_CTRL 0x00000038 +#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000 +#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000 + +#define REG_DSI_CMD_CFG0 0x0000003c + +#define REG_DSI_CMD_CFG1 0x00000040 + +#define REG_DSI_DMA_BASE 0x00000044 + +#define REG_DSI_DMA_LEN 0x00000048 + +#define REG_DSI_ACK_ERR_STATUS 0x00000064 + +static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } + +static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; } + +#define REG_DSI_TRIG_CTRL 0x00000080 +#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f +#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0 +static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val) +{ + return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK; +} +#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0 +#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4 +static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val) +{ + return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK; +} +#define DSI_TRIG_CTRL_STREAM 0x00000100 +#define DSI_TRIG_CTRL_TE 0x80000000 + +#define REG_DSI_TRIG_DMA 0x0000008c + +#define REG_DSI_DLN0_PHY_ERR 0x000000b0 + +#define REG_DSI_TIMEOUT_STATUS 0x000000bc + +#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0 +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0 +static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val) +{ + return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK; +} +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00 +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8 +static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) +{ + return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK; +} + +#define REG_DSI_EOT_PACKET_CTRL 0x000000c8 +#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001 +#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 + +#define REG_DSI_LANE_SWAP_CTRL 0x000000ac + +#define REG_DSI_ERR_INT_MASK0 0x00000108 + +#define REG_DSI_INTR_CTRL 0x0000010c + +#define REG_DSI_RESET 0x00000114 + +#define REG_DSI_CLK_CTRL 0x00000118 + +#define REG_DSI_PHY_RESET 0x00000128 + +#define REG_DSI_PHY_PLL_CTRL_0 0x00000200 +#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001 + +#define REG_DSI_PHY_PLL_CTRL_1 0x00000204 + +#define REG_DSI_PHY_PLL_CTRL_2 0x00000208 + +#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c + +#define REG_DSI_PHY_PLL_CTRL_4 0x00000210 + +#define REG_DSI_PHY_PLL_CTRL_5 0x00000214 + +#define REG_DSI_PHY_PLL_CTRL_6 0x00000218 + +#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c + +#define REG_DSI_PHY_PLL_CTRL_8 0x00000220 + +#define REG_DSI_PHY_PLL_CTRL_9 0x00000224 + +#define REG_DSI_PHY_PLL_CTRL_10 0x00000228 + +#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c + +#define REG_DSI_PHY_PLL_CTRL_12 0x00000230 + +#define REG_DSI_PHY_PLL_CTRL_13 0x00000234 + +#define REG_DSI_PHY_PLL_CTRL_14 0x00000238 + +#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c + +#define REG_DSI_PHY_PLL_CTRL_16 0x00000240 + +#define REG_DSI_PHY_PLL_CTRL_17 0x00000244 + +#define REG_DSI_PHY_PLL_CTRL_18 0x00000248 + +#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c + +#define REG_DSI_PHY_PLL_CTRL_20 0x00000250 + +#define REG_DSI_PHY_PLL_STATUS 0x00000280 +#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001 + +#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258 + +#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c + +#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c + +#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c + +#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288 + +#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c + +#define REG_DSI_8x60_PHY_CTRL_0 0x00000290 + +#define REG_DSI_8x60_PHY_CTRL_1 0x00000294 + +#define REG_DSI_8x60_PHY_CTRL_2 0x00000298 + +#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c + +#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0 + +#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4 + +#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8 + +#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac + +#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc + +#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0 + +#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4 + +#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8 + +#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc + +#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0 + +#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4 + +#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc +#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000 + +static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; } + +static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; } + +#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400 + +#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404 + +#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408 + +#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c + +#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414 + +#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418 + +#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440 + +#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444 + +#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448 + +#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c + +#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450 + +#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454 + +#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458 + +#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c + +#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460 + +#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464 + +#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468 + +#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c + +#define REG_DSI_8960_PHY_CTRL_0 0x00000470 + +#define REG_DSI_8960_PHY_CTRL_1 0x00000474 + +#define REG_DSI_8960_PHY_CTRL_2 0x00000478 + +#define REG_DSI_8960_PHY_CTRL_3 0x0000047c + +#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480 + +#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484 + +#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488 + +#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c + +#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490 + +#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494 + +#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498 + +#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c + +#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0 + +#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500 + +#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504 + +#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508 + +#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c + +#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510 + +#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518 + +#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528 + +#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c + +#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530 + +#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534 + +#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538 + +#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c + +#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540 + +#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544 + +#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548 + +#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550 +#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010 + + +#endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h new file mode 100644 index 000000000000..aefc1b8feae9 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -0,0 +1,114 @@ +#ifndef MMSS_CC_XML +#define MMSS_CC_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mmss_cc_clk { + CLK = 0, + PCLK = 1, +}; + +#define REG_MMSS_CC_AHB 0x00000008 + +static inline uint32_t __offset_CLK(enum mmss_cc_clk idx) +{ + switch (idx) { + case CLK: return 0x0000004c; + case PCLK: return 0x00000130; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); } + +static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); } +#define MMSS_CC_CLK_CC_CLK_EN 0x00000001 +#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004 +#define MMSS_CC_CLK_CC_MND_EN 0x00000020 +#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0 +#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6 +static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK; +} +#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300 +#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8 +static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK; +} + +static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); } +#define MMSS_CC_CLK_MD_D__MASK 0x000000ff +#define MMSS_CC_CLK_MD_D__SHIFT 0 +static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK; +} +#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00 +#define MMSS_CC_CLK_MD_M__SHIFT 8 +static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK; +} + +static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); } +#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f +#define MMSS_CC_CLK_NS_SRC__SHIFT 0 +static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK; +} +#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000 +#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12 +static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK; +} +#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000 +#define MMSS_CC_CLK_NS_VAL__SHIFT 24 +static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val) +{ + return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK; +} + + +#endif /* MMSS_CC_XML */ diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h new file mode 100644 index 000000000000..a225e8170b2a --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -0,0 +1,48 @@ +#ifndef SFPB_XML +#define SFPB_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +#define REG_SFPB_CFG 0x00000058 + + +#endif /* SFPB_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h new file mode 100644 index 000000000000..f5fa4865e059 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -0,0 +1,508 @@ +#ifndef HDMI_XML +#define HDMI_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum hdmi_hdcp_key_state { + NO_KEYS = 0, + NOT_CHECKED = 1, + CHECKING = 2, + KEYS_VALID = 3, + AKSV_INVALID = 4, + CHECKSUM_MISMATCH = 5, +}; + +enum hdmi_ddc_read_write { + DDC_WRITE = 0, + DDC_READ = 1, +}; + +enum hdmi_acr_cts { + ACR_NONE = 0, + ACR_32 = 1, + ACR_44 = 2, + ACR_48 = 3, +}; + +#define REG_HDMI_CTRL 0x00000000 +#define HDMI_CTRL_ENABLE 0x00000001 +#define HDMI_CTRL_HDMI 0x00000002 +#define HDMI_CTRL_ENCRYPTED 0x00000004 + +#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020 +#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001 + +#define REG_HDMI_ACR_PKT_CTRL 0x00000024 +#define HDMI_ACR_PKT_CTRL_CONT 0x00000001 +#define HDMI_ACR_PKT_CTRL_SEND 0x00000002 +#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030 +#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4 +static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val) +{ + return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK; +} +#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100 +#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000 +#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16 +static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val) +{ + return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK; +} +#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000 + +#define REG_HDMI_VBI_PKT_CTRL 0x00000028 +#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010 +#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020 +#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100 +#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200 +#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000 +#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000 + +#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c +#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001 +#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002 +#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010 +#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020 +#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040 +#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080 + +#define REG_HDMI_GEN_PKT_CTRL 0x00000034 +#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001 +#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002 +#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c +#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2 +static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val) +{ + return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK; +} +#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010 +#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020 +#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000 +#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16 +static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val) +{ + return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK; +} +#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000 +#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24 +static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val) +{ + return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK; +} + +#define REG_HDMI_GC 0x00000040 +#define HDMI_GC_MUTE 0x00000001 + +#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044 +#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001 +#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002 + +static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; } + +#define REG_HDMI_GENERIC0_HDR 0x00000084 + +static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; } + +#define REG_HDMI_GENERIC1_HDR 0x000000a4 + +static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; } + +static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; } + +static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; } +#define HDMI_ACR_0_CTS__MASK 0xfffff000 +#define HDMI_ACR_0_CTS__SHIFT 12 +static inline uint32_t HDMI_ACR_0_CTS(uint32_t val) +{ + return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK; +} + +static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; } +#define HDMI_ACR_1_N__MASK 0xffffffff +#define HDMI_ACR_1_N__SHIFT 0 +static inline uint32_t HDMI_ACR_1_N(uint32_t val) +{ + return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK; +} + +#define REG_HDMI_AUDIO_INFO0 0x000000e4 +#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff +#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0 +static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val) +{ + return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK; +} +#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700 +#define HDMI_AUDIO_INFO0_CC__SHIFT 8 +static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val) +{ + return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK; +} + +#define REG_HDMI_AUDIO_INFO1 0x000000e8 +#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff +#define HDMI_AUDIO_INFO1_CA__SHIFT 0 +static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val) +{ + return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK; +} +#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800 +#define HDMI_AUDIO_INFO1_LSV__SHIFT 11 +static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val) +{ + return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK; +} +#define HDMI_AUDIO_INFO1_DM_INH 0x00008000 + +#define REG_HDMI_HDCP_CTRL 0x00000110 +#define HDMI_HDCP_CTRL_ENABLE 0x00000001 +#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100 + +#define REG_HDMI_HDCP_INT_CTRL 0x00000118 + +#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c +#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100 +#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200 +#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000 +#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28 +static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val) +{ + return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK; +} + +#define REG_HDMI_HDCP_RESET 0x00000130 +#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 + +#define REG_HDMI_AUDIO_CFG 0x000001d0 +#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 +#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 +#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4 +static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val) +{ + return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK; +} + +#define REG_HDMI_USEC_REFTIMER 0x00000208 + +#define REG_HDMI_DDC_CTRL 0x0000020c +#define HDMI_DDC_CTRL_GO 0x00000001 +#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002 +#define HDMI_DDC_CTRL_SEND_RESET 0x00000004 +#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008 +#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000 +#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20 +static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) +{ + return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; +} + +#define REG_HDMI_DDC_INT_CTRL 0x00000214 +#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 +#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 +#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004 + +#define REG_HDMI_DDC_SW_STATUS 0x00000218 +#define HDMI_DDC_SW_STATUS_NACK0 0x00001000 +#define HDMI_DDC_SW_STATUS_NACK1 0x00002000 +#define HDMI_DDC_SW_STATUS_NACK2 0x00004000 +#define HDMI_DDC_SW_STATUS_NACK3 0x00008000 + +#define REG_HDMI_DDC_HW_STATUS 0x0000021c + +#define REG_HDMI_DDC_SPEED 0x00000220 +#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003 +#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0 +static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val) +{ + return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK; +} +#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000 +#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16 +static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val) +{ + return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK; +} + +#define REG_HDMI_DDC_SETUP 0x00000224 +#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000 +#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24 +static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val) +{ + return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK; +} + +static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; } + +static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; } +#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001 +#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0 +static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val) +{ + return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK; +} +#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100 +#define HDMI_I2C_TRANSACTION_REG_START 0x00001000 +#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000 +#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000 +#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16 +static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val) +{ + return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK; +} + +#define REG_HDMI_DDC_DATA 0x00000238 +#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001 +#define HDMI_DDC_DATA_DATA_RW__SHIFT 0 +static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val) +{ + return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK; +} +#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00 +#define HDMI_DDC_DATA_DATA__SHIFT 8 +static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val) +{ + return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK; +} +#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000 +#define HDMI_DDC_DATA_INDEX__SHIFT 16 +static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val) +{ + return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK; +} +#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000 + +#define REG_HDMI_HPD_INT_STATUS 0x00000250 +#define HDMI_HPD_INT_STATUS_INT 0x00000001 +#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002 + +#define REG_HDMI_HPD_INT_CTRL 0x00000254 +#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001 +#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002 +#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004 +#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010 +#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020 +#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200 + +#define REG_HDMI_HPD_CTRL 0x00000258 +#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff +#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0 +static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val) +{ + return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK; +} +#define HDMI_HPD_CTRL_ENABLE 0x10000000 + +#define REG_HDMI_DDC_REF 0x0000027c +#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000 +#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff +#define HDMI_DDC_REF_REFTIMER__SHIFT 0 +static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) +{ + return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; +} + +#define REG_HDMI_ACTIVE_HSYNC 0x000002b4 +#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff +#define HDMI_ACTIVE_HSYNC_START__SHIFT 0 +static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK; +} +#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000 +#define HDMI_ACTIVE_HSYNC_END__SHIFT 16 +static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK; +} + +#define REG_HDMI_ACTIVE_VSYNC 0x000002b8 +#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff +#define HDMI_ACTIVE_VSYNC_START__SHIFT 0 +static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK; +} +#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000 +#define HDMI_ACTIVE_VSYNC_END__SHIFT 16 +static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK; +} + +#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc +#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff +#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0 +static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val) +{ + return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK; +} +#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000 +#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16 +static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val) +{ + return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK; +} + +#define REG_HDMI_TOTAL 0x000002c0 +#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff +#define HDMI_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK; +} +#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000 +#define HDMI_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK; +} + +#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4 +#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff +#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0 +static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) +{ + return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK; +} + +#define REG_HDMI_FRAME_CTRL 0x000002c8 +#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000 +#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000 +#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 +#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 + +#define REG_HDMI_PHY_CTRL 0x000002d4 +#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 +#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 +#define HDMI_PHY_CTRL_SW_RESET 0x00000004 +#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 + +#define REG_HDMI_AUD_INT 0x000002cc +#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 +#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 +#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 +#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 + +#define REG_HDMI_8x60_PHY_REG0 0x00000300 +#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c +#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2 +static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) +{ + return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK; +} + +#define REG_HDMI_8x60_PHY_REG1 0x00000304 +#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0 +#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4 +static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val) +{ + return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK; +} +#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f +#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0 +static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) +{ + return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK; +} + +#define REG_HDMI_8x60_PHY_REG2 0x00000308 +#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001 +#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002 +#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004 +#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008 +#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010 +#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020 +#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040 +#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080 + +#define REG_HDMI_8x60_PHY_REG3 0x0000030c +#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001 + +#define REG_HDMI_8x60_PHY_REG4 0x00000310 + +#define REG_HDMI_8x60_PHY_REG5 0x00000314 + +#define REG_HDMI_8x60_PHY_REG6 0x00000318 + +#define REG_HDMI_8x60_PHY_REG7 0x0000031c + +#define REG_HDMI_8x60_PHY_REG8 0x00000320 + +#define REG_HDMI_8x60_PHY_REG9 0x00000324 + +#define REG_HDMI_8x60_PHY_REG10 0x00000328 + +#define REG_HDMI_8x60_PHY_REG11 0x0000032c + +#define REG_HDMI_8x60_PHY_REG12 0x00000330 +#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001 +#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002 +#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010 + +#define REG_HDMI_8960_PHY_REG0 0x00000400 + +#define REG_HDMI_8960_PHY_REG1 0x00000404 + +#define REG_HDMI_8960_PHY_REG2 0x00000408 + +#define REG_HDMI_8960_PHY_REG3 0x0000040c + +#define REG_HDMI_8960_PHY_REG4 0x00000410 + +#define REG_HDMI_8960_PHY_REG5 0x00000414 + +#define REG_HDMI_8960_PHY_REG6 0x00000418 + +#define REG_HDMI_8960_PHY_REG7 0x0000041c + +#define REG_HDMI_8960_PHY_REG8 0x00000420 + +#define REG_HDMI_8960_PHY_REG9 0x00000424 + +#define REG_HDMI_8960_PHY_REG10 0x00000428 + +#define REG_HDMI_8960_PHY_REG11 0x0000042c + +#define REG_HDMI_8960_PHY_REG12 0x00000430 + + +#endif /* HDMI_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h new file mode 100644 index 000000000000..bee36363bcd0 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -0,0 +1,50 @@ +#ifndef QFPROM_XML +#define QFPROM_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238 +#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000 +#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000 + + +#endif /* QFPROM_XML */ diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h new file mode 100644 index 000000000000..bbeeebe2db55 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h @@ -0,0 +1,1061 @@ +#ifndef MDP4_XML +#define MDP4_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mpd4_bpc { + BPC1 = 0, + BPC5 = 1, + BPC6 = 2, + BPC8 = 3, +}; + +enum mpd4_bpc_alpha { + BPC1A = 0, + BPC4A = 1, + BPC6A = 2, + BPC8A = 3, +}; + +enum mpd4_alpha_type { + FG_CONST = 0, + BG_CONST = 1, + FG_PIXEL = 2, + BG_PIXEL = 3, +}; + +enum mpd4_pipe { + VG1 = 0, + VG2 = 1, + RGB1 = 2, + RGB2 = 3, + RGB3 = 4, + VG3 = 5, + VG4 = 6, +}; + +enum mpd4_mixer { + MIXER0 = 0, + MIXER1 = 1, + MIXER2 = 2, +}; + +enum mpd4_mixer_stage_id { + STAGE_UNUSED = 0, + STAGE_BASE = 1, + STAGE0 = 2, + STAGE1 = 3, + STAGE2 = 4, + STAGE3 = 5, +}; + +enum mdp4_intf { + INTF_LCDC_DTV = 0, + INTF_DSI_VIDEO = 1, + INTF_DSI_CMD = 2, + INTF_EBI2_TV = 3, +}; + +enum mdp4_cursor_format { + CURSOR_ARGB = 1, + CURSOR_XRGB = 2, +}; + +enum mdp4_dma { + DMA_P = 0, + DMA_S = 1, + DMA_E = 2, +}; + +#define MDP4_IRQ_OVERLAY0_DONE 0x00000001 +#define MDP4_IRQ_OVERLAY1_DONE 0x00000002 +#define MDP4_IRQ_DMA_S_DONE 0x00000004 +#define MDP4_IRQ_DMA_E_DONE 0x00000008 +#define MDP4_IRQ_DMA_P_DONE 0x00000010 +#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020 +#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040 +#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080 +#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100 +#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200 +#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400 +#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800 +#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000 +#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000 +#define MDP4_IRQ_OVERLAY2_DONE 0x40000000 +#define REG_MDP4_VERSION 0x00000000 +#define MDP4_VERSION_MINOR__MASK 0x00ff0000 +#define MDP4_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP4_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK; +} +#define MDP4_VERSION_MAJOR__MASK 0xff000000 +#define MDP4_VERSION_MAJOR__SHIFT 24 +static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK; +} + +#define REG_MDP4_OVLP0_KICK 0x00000004 + +#define REG_MDP4_OVLP1_KICK 0x00000008 + +#define REG_MDP4_OVLP2_KICK 0x000000d0 + +#define REG_MDP4_DMA_P_KICK 0x0000000c + +#define REG_MDP4_DMA_S_KICK 0x00000010 + +#define REG_MDP4_DMA_E_KICK 0x00000014 + +#define REG_MDP4_DISP_STATUS 0x00000018 + +#define REG_MDP4_DISP_INTF_SEL 0x00000038 +#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003 +#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0 +static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK; +} +#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c +#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2 +static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK; +} +#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030 +#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4 +static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK; +} +#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040 +#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080 + +#define REG_MDP4_RESET_STATUS 0x0000003c + +#define REG_MDP4_READ_CNFG 0x0000004c + +#define REG_MDP4_INTR_ENABLE 0x00000050 + +#define REG_MDP4_INTR_STATUS 0x00000054 + +#define REG_MDP4_INTR_CLEAR 0x00000058 + +#define REG_MDP4_EBI2_LCD0 0x00000060 + +#define REG_MDP4_EBI2_LCD1 0x00000064 + +#define REG_MDP4_PORTMAP_MODE 0x00000070 + +#define REG_MDP4_CS_CONTROLLER0 0x000000c0 + +#define REG_MDP4_CS_CONTROLLER1 0x000000c4 + +#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc + +#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_VG2_SRC_FORMAT 0x00030050 + +#define REG_MDP4_VG2_CONST_COLOR 0x00031008 + +#define REG_MDP4_OVERLAY_FLUSH 0x00018000 +#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001 +#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002 +#define MDP4_OVERLAY_FLUSH_VG1 0x00000004 +#define MDP4_OVERLAY_FLUSH_VG2 0x00000008 +#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010 +#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020 + +static inline uint32_t __offset_OVLP(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00010000; + case 1: return 0x00018000; + case 2: return 0x00088000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); } +#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK; +} +#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); } + +static inline uint32_t __offset_STAGE(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000104; + case 1: return 0x00000124; + case 2: return 0x00000144; + case 3: return 0x00000160; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 +static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004 +#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 +static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040 +#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080 +#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100 +#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200 + +static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t __offset_STAGE_CO3(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00001004; + case 1: return 0x00001404; + case 2: return 0x00001804; + case 3: return 0x00001b84; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } +#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001 + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); } + + +static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +#define REG_MDP4_DMA_P_OP_MODE 0x00090070 + +static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; } + +static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +#define REG_MDP4_DMA_S_OP_MODE 0x000a0028 + +static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; } + +static inline uint32_t __offset_DMA(enum mdp4_dma idx) +{ + switch (idx) { + case DMA_P: return 0x00090000; + case DMA_S: return 0x000a0000; + case DMA_E: return 0x000b0000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } +#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 +#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 +static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; +} +#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c +#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 +static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; +} +#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 +#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 +static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; +} +#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080 +#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00 +#define MDP4_DMA_CONFIG_PACK__SHIFT 8 +static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val) +{ + return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK; +} +#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000 +#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000 + +static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); } +#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); } +#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f +#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK; +} +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000 +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff +#define MDP4_DMA_CURSOR_POS_X__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK; +} +#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000 +#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1 +static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val) +{ + return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK; +} +#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008 + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); } + + +static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } +#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; } +#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK; +} +#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_SRC_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; } +#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; } +#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_DST_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK; +} +#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_DST_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; } +#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; } +#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 +#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c +#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 +#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 +#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 +#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 +#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 + +static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; } +#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff +#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 +#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 +#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 +#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; } +#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 +#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 +#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 +#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 +#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 +#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000 +#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000 +#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000 +#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000 +#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 +#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 + +static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; } + + +static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +#define REG_MDP4_LCDC 0x000c0000 + +#define REG_MDP4_LCDC_ENABLE 0x000c0000 + +#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004 +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008 + +#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c + +#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010 +#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014 + +#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018 + +#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c +#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020 + +#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024 + +#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028 + +#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030 + +#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034 + +#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038 +#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_DTV 0x000d0000 + +#define REG_MDP4_DTV_ENABLE 0x000d0000 + +#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004 +#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008 + +#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c + +#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018 +#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c + +#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020 + +#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c +#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030 + +#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038 + +#define REG_MDP4_DTV_BORDER_CLR 0x000d0040 + +#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044 +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048 + +#define REG_MDP4_DTV_TEST_CNTL 0x000d004c + +#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050 +#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_DSI 0x000e0000 + +#define REG_MDP4_DSI_ENABLE 0x000e0000 + +#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004 +#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008 + +#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c + +#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010 +#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014 + +#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018 + +#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c +#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020 + +#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024 + +#define REG_MDP4_DSI_BORDER_CLR 0x000e0028 + +#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030 + +#define REG_MDP4_DSI_TEST_CNTL 0x000e0034 + +#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038 +#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + + +#endif /* MDP4_XML */ -- cgit v1.2.3 From c8afe684c95cd17cf4f273d81af369a0fdfa5a74 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 26 Jun 2013 12:44:06 -0400 Subject: drm/msm: basic KMS driver for snapdragon The snapdragon chips have multiple different display controllers, depending on which chip variant/version. (As far as I can tell, current devices have either MDP3 or MDP4, and upcoming devices have MDSS.) And then external to the display controller are HDMI, DSI, etc. blocks which may be shared across devices which have different display controller blocks. To more easily add support for different display controller blocks, the display controller specific bits are split out into a "kms" module, which provides the kms plane/crtc/encoder objects. The external HDMI, DSI, etc. blocks are part encoder, and part connector currently. But I think I will pull in the drm_bridge patches from chromeos tree, and split them into a bridge+connector, with the registers that need to be set in modeset handled by the bridge. This would remove the 'msm_connector' base class. But some things need to be double checked to make sure I could get the correct ON/OFF sequencing.. This patch adds support for mdp4 crtc (including hw cursor), dtv encoder (part of MDP4 block), and hdmi. Signed-off-by: Rob Clark --- drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/msm/Kconfig | 34 ++ drivers/gpu/drm/msm/Makefile | 25 + drivers/gpu/drm/msm/NOTES | 69 +++ drivers/gpu/drm/msm/hdmi/hdmi.c | 235 ++++++++++ drivers/gpu/drm/msm/hdmi/hdmi.h | 112 +++++ drivers/gpu/drm/msm/hdmi/hdmi_connector.c | 461 +++++++++++++++++++ drivers/gpu/drm/msm/hdmi/hdmi_i2c.c | 281 ++++++++++++ drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c | 141 ++++++ drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c | 214 +++++++++ drivers/gpu/drm/msm/mdp4/mdp4_crtc.c | 684 ++++++++++++++++++++++++++++ drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c | 317 +++++++++++++ drivers/gpu/drm/msm/mdp4/mdp4_format.c | 56 +++ drivers/gpu/drm/msm/mdp4/mdp4_irq.c | 203 +++++++++ drivers/gpu/drm/msm/mdp4/mdp4_kms.c | 368 +++++++++++++++ drivers/gpu/drm/msm/mdp4/mdp4_kms.h | 194 ++++++++ drivers/gpu/drm/msm/mdp4/mdp4_plane.c | 243 ++++++++++ drivers/gpu/drm/msm/msm_connector.c | 34 ++ drivers/gpu/drm/msm/msm_connector.h | 68 +++ drivers/gpu/drm/msm/msm_drv.c | 532 ++++++++++++++++++++++ drivers/gpu/drm/msm/msm_drv.h | 187 ++++++++ drivers/gpu/drm/msm/msm_fb.c | 202 ++++++++ drivers/gpu/drm/msm/msm_fbdev.c | 258 +++++++++++ drivers/gpu/drm/msm/msm_gem.c | 521 +++++++++++++++++++++ drivers/gpu/drm/msm/msm_gem.h | 41 ++ 26 files changed, 5483 insertions(+) create mode 100644 drivers/gpu/drm/msm/Kconfig create mode 100644 drivers/gpu/drm/msm/Makefile create mode 100644 drivers/gpu/drm/msm/NOTES create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.h create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_connector.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_crtc.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_format.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_irq.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_kms.c create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_kms.h create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_plane.c create mode 100644 drivers/gpu/drm/msm/msm_connector.c create mode 100644 drivers/gpu/drm/msm/msm_connector.h create mode 100644 drivers/gpu/drm/msm/msm_drv.c create mode 100644 drivers/gpu/drm/msm/msm_drv.h create mode 100644 drivers/gpu/drm/msm/msm_fb.c create mode 100644 drivers/gpu/drm/msm/msm_fbdev.c create mode 100644 drivers/gpu/drm/msm/msm_gem.c create mode 100644 drivers/gpu/drm/msm/msm_gem.h diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 626bc0cb1046..39573c5f7518 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -223,3 +223,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig" source "drivers/gpu/drm/tilcdc/Kconfig" source "drivers/gpu/drm/qxl/Kconfig" + +source "drivers/gpu/drm/msm/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7b2343a2f5eb..f089adfe70ee 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-$(CONFIG_DRM_OMAP) += omapdrm/ obj-$(CONFIG_DRM_TILCDC) += tilcdc/ obj-$(CONFIG_DRM_QXL) += qxl/ +obj-$(CONFIG_DRM_MSM) += msm/ obj-y += i2c/ diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig new file mode 100644 index 000000000000..a06c19cc56f8 --- /dev/null +++ b/drivers/gpu/drm/msm/Kconfig @@ -0,0 +1,34 @@ + +config DRM_MSM + tristate "MSM DRM" + depends on DRM + depends on ARCH_MSM + depends on ARCH_MSM8960 + select DRM_KMS_HELPER + select SHMEM + select TMPFS + default y + help + DRM/KMS driver for MSM/snapdragon. + +config DRM_MSM_FBDEV + bool "Enable legacy fbdev support for MSM modesetting driver" + depends on DRM_MSM + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS + default y + help + Choose this option if you have a need for the legacy fbdev + support. Note that this support also provide the linux console + support on top of the MSM modesetting driver. + +config DRM_MSM_REGISTER_LOGGING + bool "MSM DRM register logging" + depends on DRM_MSM + default n + help + Compile in support for logging register reads/writes in a format + that can be parsed by envytools demsm tool. If enabled, register + logging can be switched on via msm.reglog=y module param. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile new file mode 100644 index 000000000000..4068122a9377 --- /dev/null +++ b/drivers/gpu/drm/msm/Makefile @@ -0,0 +1,25 @@ +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm +ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) + ccflags-y += -Werror +endif + +msm-y := \ + hdmi/hdmi.o \ + hdmi/hdmi_connector.o \ + hdmi/hdmi_i2c.o \ + hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8x60.o \ + mdp4/mdp4_crtc.o \ + mdp4/mdp4_dtv_encoder.o \ + mdp4/mdp4_format.o \ + mdp4/mdp4_irq.o \ + mdp4/mdp4_kms.o \ + mdp4/mdp4_plane.o \ + msm_connector.o \ + msm_drv.o \ + msm_fb.o \ + msm_gem.o + +msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o + +obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES new file mode 100644 index 000000000000..e036f6c1db94 --- /dev/null +++ b/drivers/gpu/drm/msm/NOTES @@ -0,0 +1,69 @@ +NOTES about msm drm/kms driver: + +In the current snapdragon SoC's, we have (at least) 3 different +display controller blocks at play: + + MDP3 - ?? seems to be what is on geeksphone peak device + + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) + + MDSS - snapdragon 800 + +(I don't have a completely clear picture on which display controller +maps to which part #) + +Plus a handful of blocks around them for HDMI/DSI/etc output. + +And on gpu side of things: + + zero, one, or two 2d cores (z180) + + and either a2xx or a3xx 3d core. + +But, HDMI/DSI/etc blocks seem like they can be shared across multiple +display controller blocks. And I for sure don't want to have to deal +with N different kms devices from xf86-video-freedreno. Plus, it +seems like we can do some clever tricks like use GPU to trigger +pageflip after rendering completes (ie. have the kms/crtc code build +up gpu cmdstream to update scanout and write FLUSH register after). + +So, the approach is one drm driver, with some modularity. Different +'struct msm_kms' implementations, depending on display controller. +And one or more 'struct msm_gpu' for the various different gpu sub- +modules. + +(Second part is not implemented yet. So far this is just basic KMS +driver, and not exposing any custom ioctls to userspace for now.) + +The kms module provides the plane, crtc, and encoder objects, and +loads whatever connectors are appropriate. + +For MDP4, the mapping is: + + plane -> PIPE{RGBn,VGn} \ + crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device" + encoder -> DTV/LCDC/DSI (within MDP4) / + connector -> HDMI/DSI/etc --> other device(s) + +Since the irq's that drm core mostly cares about are vblank/framedone, +we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions +and treat the MDP4 block's irq as "the" irq. Even though the connectors +may have their own irqs which they install themselves. For this reason +the display controller is the "master" device. + +Each connector probably ends up being a separate device, just for the +logistics of finding/mapping io region, irq, etc. Idealy we would +have a better way than just stashing the platform device in a global +(ie. like DT super-node.. but I don't have any snapdragon hw yet that +is using DT). + +Note that so far I've not been able to get any docs on the hw, and it +seems that access to such docs would prevent me from working on the +freedreno gallium driver. So there may be some mistakes in register +names (I had to invent a few, since no sufficient hint was given in +the downstream android fbdev driver), bitfield sizes, etc. My current +state of understanding the registers is given in the envytools rnndb +files at: + + https://github.com/freedreno/envytools/tree/master/rnndb + (the mdp4/hdmi/dsi directories) + +These files are used both for a parser tool (in the same tree) to +parse logged register reads/writes (both from downstream android fbdev +driver, and this driver with register logging enabled), as well as to +generate the register level headers. diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c new file mode 100644 index 000000000000..12ecfb928f75 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "hdmi.h" + +static struct platform_device *hdmi_pdev; + +void hdmi_set_mode(struct hdmi *hdmi, bool power_on) +{ + uint32_t ctrl = 0; + + if (power_on) { + ctrl |= HDMI_CTRL_ENABLE; + if (!hdmi->hdmi_mode) { + ctrl |= HDMI_CTRL_HDMI; + hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); + ctrl &= ~HDMI_CTRL_HDMI; + } else { + ctrl |= HDMI_CTRL_HDMI; + } + } else { + ctrl = HDMI_CTRL_HDMI; + } + + hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); + DBG("HDMI Core: %s, HDMI_CTRL=0x%08x", + power_on ? "Enable" : "Disable", ctrl); +} + +static irqreturn_t hdmi_irq(int irq, void *dev_id) +{ + struct hdmi *hdmi = dev_id; + + /* Process HPD: */ + hdmi_connector_irq(hdmi->connector); + + /* Process DDC: */ + hdmi_i2c_irq(hdmi->i2c); + + /* TODO audio.. */ + + return IRQ_HANDLED; +} + +void hdmi_destroy(struct hdmi *hdmi) +{ + struct hdmi_phy *phy = hdmi->phy; + + if (phy) + phy->funcs->destroy(phy); + + if (hdmi->i2c) + hdmi_i2c_destroy(hdmi->i2c); + + put_device(&hdmi->pdev->dev); +} + +/* initialize connector */ +int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, + struct drm_connector *connector) +{ + struct platform_device *pdev = hdmi_pdev; + struct hdmi_platform_config *config; + int ret; + + if (!pdev) { + dev_err(dev->dev, "no hdmi device\n"); + ret = -ENXIO; + goto fail; + } + + config = pdev->dev.platform_data; + + get_device(&pdev->dev); + + hdmi->dev = dev; + hdmi->pdev = pdev; + hdmi->connector = connector; + + /* not sure about which phy maps to which msm.. probably I miss some */ + if (config->phy_init) + hdmi->phy = config->phy_init(hdmi); + else + hdmi->phy = ERR_PTR(-ENXIO); + + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + dev_err(dev->dev, "failed to load phy: %d\n", ret); + hdmi->phy = NULL; + goto fail; + } + + hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI"); + if (IS_ERR(hdmi->mmio)) { + ret = PTR_ERR(hdmi->mmio); + goto fail; + } + + hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs"); + if (IS_ERR(hdmi->mvs)) + hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs"); + if (IS_ERR(hdmi->mvs)) { + ret = PTR_ERR(hdmi->mvs); + dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret); + goto fail; + } + + hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0"); + if (IS_ERR(hdmi->mpp0)) + hdmi->mpp0 = NULL; + + hdmi->clk = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(hdmi->clk)) { + ret = PTR_ERR(hdmi->clk); + dev_err(dev->dev, "failed to get 'clk': %d\n", ret); + goto fail; + } + + hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk"); + if (IS_ERR(hdmi->m_pclk)) { + ret = PTR_ERR(hdmi->m_pclk); + dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret); + goto fail; + } + + hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk"); + if (IS_ERR(hdmi->s_pclk)) { + ret = PTR_ERR(hdmi->s_pclk); + dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret); + goto fail; + } + + hdmi->i2c = hdmi_i2c_init(hdmi); + if (IS_ERR(hdmi->i2c)) { + ret = PTR_ERR(hdmi->i2c); + dev_err(dev->dev, "failed to get i2c: %d\n", ret); + hdmi->i2c = NULL; + goto fail; + } + + hdmi->irq = platform_get_irq(pdev, 0); + if (hdmi->irq < 0) { + ret = hdmi->irq; + dev_err(dev->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, + NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "hdmi_isr", hdmi); + if (ret < 0) { + dev_err(dev->dev, "failed to request IRQ%u: %d\n", + hdmi->irq, ret); + goto fail; + } + + return 0; + +fail: + if (hdmi) + hdmi_destroy(hdmi); + + return ret; +} + +/* + * The hdmi device: + */ + +static int hdmi_dev_probe(struct platform_device *pdev) +{ + static struct hdmi_platform_config config = {}; +#ifdef CONFIG_OF + /* TODO */ +#else + if (cpu_is_apq8064()) { + config.phy_init = hdmi_phy_8960_init; + config.ddc_clk_gpio = 70; + config.ddc_data_gpio = 71; + config.hpd_gpio = 72; + config.pmic_gpio = 13 + NR_GPIO_IRQS; + } else if (cpu_is_msm8960()) { + config.phy_init = hdmi_phy_8960_init; + config.ddc_clk_gpio = 100; + config.ddc_data_gpio = 101; + config.hpd_gpio = 102; + config.pmic_gpio = -1; + } else if (cpu_is_msm8x60()) { + config.phy_init = hdmi_phy_8x60_init; + config.ddc_clk_gpio = 170; + config.ddc_data_gpio = 171; + config.hpd_gpio = 172; + config.pmic_gpio = -1; + } +#endif + pdev->dev.platform_data = &config; + hdmi_pdev = pdev; + return 0; +} + +static int hdmi_dev_remove(struct platform_device *pdev) +{ + hdmi_pdev = NULL; + return 0; +} + +static struct platform_driver hdmi_driver = { + .probe = hdmi_dev_probe, + .remove = hdmi_dev_remove, + .driver.name = "hdmi_msm", +}; + +void __init hdmi_register(void) +{ + platform_driver_register(&hdmi_driver); +} + +void __exit hdmi_unregister(void) +{ + platform_driver_unregister(&hdmi_driver); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h new file mode 100644 index 000000000000..34703fea22ca --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __HDMI_CONNECTOR_H__ +#define __HDMI_CONNECTOR_H__ + +#include +#include +#include +#include + +#include "msm_drv.h" +#include "hdmi.xml.h" + + +struct hdmi_phy; + +struct hdmi { + struct drm_device *dev; + struct platform_device *pdev; + + void __iomem *mmio; + + struct regulator *mvs; /* HDMI_5V */ + struct regulator *mpp0; /* External 5V */ + + struct clk *clk; + struct clk *m_pclk; + struct clk *s_pclk; + + struct hdmi_phy *phy; + struct i2c_adapter *i2c; + struct drm_connector *connector; + + bool hdmi_mode; /* are we in hdmi mode? */ + + int irq; +}; + +/* platform config data (ie. from DT, or pdata) */ +struct hdmi_platform_config { + struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); + int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio; +}; + +void hdmi_set_mode(struct hdmi *hdmi, bool power_on); +void hdmi_destroy(struct hdmi *hdmi); +int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, + struct drm_connector *connector); + +static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) +{ + msm_writel(data, hdmi->mmio + reg); +} + +static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) +{ + return msm_readl(hdmi->mmio + reg); +} + +/* + * The phy appears to be different, for example between 8960 and 8x60, + * so split the phy related functions out and load the correct one at + * runtime: + */ + +struct hdmi_phy_funcs { + void (*destroy)(struct hdmi_phy *phy); + void (*reset)(struct hdmi_phy *phy); + void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); + void (*powerdown)(struct hdmi_phy *phy); +}; + +struct hdmi_phy { + const struct hdmi_phy_funcs *funcs; +}; + +/* + * phy can be different on different generations: + */ +struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); +struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); + +/* + * hdmi connector: + */ + +void hdmi_connector_irq(struct drm_connector *connector); + +/* + * i2c adapter for ddc: + */ + +void hdmi_i2c_irq(struct i2c_adapter *i2c); +void hdmi_i2c_destroy(struct i2c_adapter *i2c); +struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); + +#endif /* __HDMI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c new file mode 100644 index 000000000000..7d63f5ffa7ba --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -0,0 +1,461 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "msm_connector.h" +#include "hdmi.h" + +struct hdmi_connector { + struct msm_connector base; + struct hdmi hdmi; + unsigned long int pixclock; + bool enabled; +}; +#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) + +static int gpio_config(struct hdmi *hdmi, bool on) +{ + struct drm_device *dev = hdmi->dev; + struct hdmi_platform_config *config = + hdmi->pdev->dev.platform_data; + int ret; + + if (on) { + ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); + if (ret) { + dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); + goto error1; + } + ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); + if (ret) { + dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_DATA", config->ddc_data_gpio, ret); + goto error2; + } + ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); + if (ret) { + dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_HPD", config->hpd_gpio, ret); + goto error3; + } + if (config->pmic_gpio != -1) { + ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL"); + if (ret) { + dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", + "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret); + goto error4; + } + gpio_set_value_cansleep(config->pmic_gpio, 0); + } + DBG("gpio on"); + } else { + gpio_free(config->ddc_clk_gpio); + gpio_free(config->ddc_data_gpio); + gpio_free(config->hpd_gpio); + + if (config->pmic_gpio != -1) { + gpio_set_value_cansleep(config->pmic_gpio, 1); + gpio_free(config->pmic_gpio); + } + DBG("gpio off"); + } + + return 0; + +error4: + gpio_free(config->hpd_gpio); +error3: + gpio_free(config->ddc_data_gpio); +error2: + gpio_free(config->ddc_clk_gpio); +error1: + return ret; +} + +static int hpd_enable(struct hdmi_connector *hdmi_connector) +{ + struct hdmi *hdmi = &hdmi_connector->hdmi; + struct drm_device *dev = hdmi_connector->base.base.dev; + struct hdmi_phy *phy = hdmi->phy; + uint32_t hpd_ctrl; + int ret; + + ret = gpio_config(hdmi, true); + if (ret) { + dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); + goto fail; + } + + ret = clk_prepare_enable(hdmi->clk); + if (ret) { + dev_err(dev->dev, "failed to enable 'clk': %d\n", ret); + goto fail; + } + + ret = clk_prepare_enable(hdmi->m_pclk); + if (ret) { + dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret); + goto fail; + } + + ret = clk_prepare_enable(hdmi->s_pclk); + if (ret) { + dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret); + goto fail; + } + + if (hdmi->mpp0) + ret = regulator_enable(hdmi->mpp0); + if (!ret) + ret = regulator_enable(hdmi->mvs); + if (ret) { + dev_err(dev->dev, "failed to enable regulators: %d\n", ret); + goto fail; + } + + hdmi_set_mode(hdmi, false); + phy->funcs->reset(phy); + hdmi_set_mode(hdmi, true); + + hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); + + /* enable HPD events: */ + hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, + HDMI_HPD_INT_CTRL_INT_CONNECT | + HDMI_HPD_INT_CTRL_INT_EN); + + /* set timeout to 4.1ms (max) for hardware debounce */ + hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff); + + /* Toggle HPD circuit to trigger HPD sense */ + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, + ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl); + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, + HDMI_HPD_CTRL_ENABLE | hpd_ctrl); + + return 0; + +fail: + return ret; +} + +static int hdp_disable(struct hdmi_connector *hdmi_connector) +{ + struct hdmi *hdmi = &hdmi_connector->hdmi; + struct drm_device *dev = hdmi_connector->base.base.dev; + int ret = 0; + + /* Disable HPD interrupt */ + hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); + + hdmi_set_mode(hdmi, false); + + if (hdmi->mpp0) + ret = regulator_disable(hdmi->mpp0); + if (!ret) + ret = regulator_disable(hdmi->mvs); + if (ret) { + dev_err(dev->dev, "failed to enable regulators: %d\n", ret); + goto fail; + } + + clk_disable_unprepare(hdmi->clk); + clk_disable_unprepare(hdmi->m_pclk); + clk_disable_unprepare(hdmi->s_pclk); + + ret = gpio_config(hdmi, false); + if (ret) { + dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); + goto fail; + } + + return 0; + +fail: + return ret; +} + +void hdmi_connector_irq(struct drm_connector *connector) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi *hdmi = &hdmi_connector->hdmi; + uint32_t hpd_int_status, hpd_int_ctrl; + + /* Process HPD: */ + hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); + hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL); + + if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) && + (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { + bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); + + DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); + + /* ack the irq: */ + hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, + hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); + + drm_helper_hpd_irq_event(connector->dev); + + /* detect disconnect if we are connected or visa versa: */ + hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; + if (!detected) + hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; + hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); + } +} + +static enum drm_connector_status hdmi_connector_detect( + struct drm_connector *connector, bool force) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi *hdmi = &hdmi_connector->hdmi; + uint32_t hpd_int_status; + int retry = 20; + + hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); + + /* sense seems to in some cases be momentarily de-asserted, don't + * let that trick us into thinking the monitor is gone: + */ + while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { + mdelay(10); + hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); + DBG("status=%08x", hpd_int_status); + } + + return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? + connector_status_connected : connector_status_disconnected; +} + +static void hdmi_connector_destroy(struct drm_connector *connector) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + + hdp_disable(hdmi_connector); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + + hdmi_destroy(&hdmi_connector->hdmi); + + kfree(hdmi_connector); +} + +static int hdmi_connector_get_modes(struct drm_connector *connector) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi *hdmi = &hdmi_connector->hdmi; + struct edid *edid; + uint32_t hdmi_ctrl; + int ret = 0; + + hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); + + edid = drm_get_edid(connector, hdmi->i2c); + + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + + drm_mode_connector_update_edid_property(connector, edid); + + if (edid) { + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + return ret; +} + +static int hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + struct msm_drm_private *priv = connector->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, + requested, msm_connector->encoder); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return 0; +} + +static const struct drm_connector_funcs hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = hdmi_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = hdmi_connector_destroy, +}; + +static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { + .get_modes = hdmi_connector_get_modes, + .mode_valid = hdmi_connector_mode_valid, + .best_encoder = msm_connector_attached_encoder, +}; + +static void hdmi_connector_dpms(struct msm_connector *msm_connector, int mode) +{ + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi *hdmi = &hdmi_connector->hdmi; + struct hdmi_phy *phy = hdmi->phy; + bool enabled = (mode == DRM_MODE_DPMS_ON); + + DBG("mode=%d", mode); + + if (enabled == hdmi_connector->enabled) + return; + + if (enabled) { + phy->funcs->powerup(phy, hdmi_connector->pixclock); + hdmi_set_mode(hdmi, true); + } else { + hdmi_set_mode(hdmi, false); + phy->funcs->powerdown(phy); + } + + hdmi_connector->enabled = enabled; +} + +static void hdmi_connector_mode_set(struct msm_connector *msm_connector, + struct drm_display_mode *mode) +{ + struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi *hdmi = &hdmi_connector->hdmi; + int hstart, hend, vstart, vend; + uint32_t frame_ctrl; + + hdmi_connector->pixclock = mode->clock * 1000; + + hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; + + hstart = mode->htotal - mode->hsync_start; + hend = mode->htotal - mode->hsync_start + mode->hdisplay; + + vstart = mode->vtotal - mode->vsync_start - 1; + vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1; + + DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d", + mode->htotal, mode->vtotal, hstart, hend, vstart, vend); + + hdmi_write(hdmi, REG_HDMI_TOTAL, + HDMI_TOTAL_H_TOTAL(mode->htotal - 1) | + HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); + + hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC, + HDMI_ACTIVE_HSYNC_START(hstart) | + HDMI_ACTIVE_HSYNC_END(hend)); + hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC, + HDMI_ACTIVE_VSYNC_START(vstart) | + HDMI_ACTIVE_VSYNC_END(vend)); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, + HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal)); + hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, + HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) | + HDMI_VSYNC_ACTIVE_F2_END(vend + 1)); + } else { + hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, + HDMI_VSYNC_TOTAL_F2_V_TOTAL(0)); + hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, + HDMI_VSYNC_ACTIVE_F2_START(0) | + HDMI_VSYNC_ACTIVE_F2_END(0)); + } + + frame_ctrl = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN; + DBG("frame_ctrl=%08x", frame_ctrl); + hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); + + // TODO until we have audio, this might be safest: + if (hdmi->hdmi_mode) + hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); +} + +static const struct msm_connector_funcs msm_connector_funcs = { + .dpms = hdmi_connector_dpms, + .mode_set = hdmi_connector_mode_set, +}; + +/* initialize connector */ +struct drm_connector *hdmi_connector_init(struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct drm_connector *connector = NULL; + struct hdmi_connector *hdmi_connector; + int ret; + + hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); + if (!hdmi_connector) { + ret = -ENOMEM; + goto fail; + } + + connector = &hdmi_connector->base.base; + + msm_connector_init(&hdmi_connector->base, + &msm_connector_funcs, encoder); + drm_connector_init(dev, connector, &hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); + + connector->polled = DRM_CONNECTOR_POLL_HPD; + + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; + + drm_sysfs_connector_add(connector); + + ret = hdmi_init(&hdmi_connector->hdmi, dev, connector); + if (ret) + goto fail; + + ret = hpd_enable(hdmi_connector); + if (ret) { + dev_err(dev->dev, "failed to enable HPD: %d\n", ret); + goto fail; + } + + drm_mode_connector_attach_encoder(connector, encoder); + + return connector; + +fail: + if (connector) + hdmi_connector_destroy(connector); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c new file mode 100644 index 000000000000..f4ab7f70fed1 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -0,0 +1,281 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "hdmi.h" + +struct hdmi_i2c_adapter { + struct i2c_adapter base; + struct hdmi *hdmi; + bool sw_done; + wait_queue_head_t ddc_event; +}; +#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base) + +static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c) +{ + struct hdmi *hdmi = hdmi_i2c->hdmi; + + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, + HDMI_DDC_CTRL_SW_STATUS_RESET); + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, + HDMI_DDC_CTRL_SOFT_RESET); + + hdmi_write(hdmi, REG_HDMI_DDC_SPEED, + HDMI_DDC_SPEED_THRESHOLD(2) | + HDMI_DDC_SPEED_PRESCALE(10)); + + hdmi_write(hdmi, REG_HDMI_DDC_SETUP, + HDMI_DDC_SETUP_TIMEOUT(0xff)); + + /* enable reference timer for 27us */ + hdmi_write(hdmi, REG_HDMI_DDC_REF, + HDMI_DDC_REF_REFTIMER_ENABLE | + HDMI_DDC_REF_REFTIMER(27)); +} + +static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c) +{ + struct hdmi *hdmi = hdmi_i2c->hdmi; + struct drm_device *dev = hdmi->dev; + uint32_t retry = 0xffff; + uint32_t ddc_int_ctrl; + + do { + --retry; + + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, + HDMI_DDC_INT_CTRL_SW_DONE_ACK | + HDMI_DDC_INT_CTRL_SW_DONE_MASK); + + ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL); + + } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry); + + if (!retry) { + dev_err(dev->dev, "timeout waiting for DDC\n"); + return -ETIMEDOUT; + } + + hdmi_i2c->sw_done = false; + + return 0; +} + +#define MAX_TRANSACTIONS 4 + +static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c) +{ + struct hdmi *hdmi = hdmi_i2c->hdmi; + + if (!hdmi_i2c->sw_done) { + uint32_t ddc_int_ctrl; + + ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL); + + if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) && + (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) { + hdmi_i2c->sw_done = true; + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, + HDMI_DDC_INT_CTRL_SW_DONE_ACK); + } + } + + return hdmi_i2c->sw_done; +} + +static int hdmi_i2c_xfer(struct i2c_adapter *i2c, + struct i2c_msg *msgs, int num) +{ + struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); + struct hdmi *hdmi = hdmi_i2c->hdmi; + struct drm_device *dev = hdmi->dev; + static const uint32_t nack[] = { + HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1, + HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3, + }; + int indices[MAX_TRANSACTIONS]; + int ret, i, j, index = 0; + uint32_t ddc_status, ddc_data, i2c_trans; + + num = min(num, MAX_TRANSACTIONS); + + WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE)); + + if (num == 0) + return num; + + init_ddc(hdmi_i2c); + + ret = ddc_clear_irq(hdmi_i2c); + if (ret) + return ret; + + for (i = 0; i < num; i++) { + struct i2c_msg *p = &msgs[i]; + uint32_t raw_addr = p->addr << 1; + + if (p->flags & I2C_M_RD) + raw_addr |= 1; + + ddc_data = HDMI_DDC_DATA_DATA(raw_addr) | + HDMI_DDC_DATA_DATA_RW(DDC_WRITE); + + if (i == 0) { + ddc_data |= HDMI_DDC_DATA_INDEX(0) | + HDMI_DDC_DATA_INDEX_WRITE; + } + + hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data); + index++; + + indices[i] = index; + + if (p->flags & I2C_M_RD) { + index += p->len; + } else { + for (j = 0; j < p->len; j++) { + ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) | + HDMI_DDC_DATA_DATA_RW(DDC_WRITE); + hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data); + index++; + } + } + + i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) | + HDMI_I2C_TRANSACTION_REG_RW( + (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) | + HDMI_I2C_TRANSACTION_REG_START; + + if (i == (num - 1)) + i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP; + + hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans); + } + + /* trigger the transfer: */ + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, + HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) | + HDMI_DDC_CTRL_GO); + + ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4); + if (ret <= 0) { + if (ret == 0) + ret = -ETIMEDOUT; + dev_warn(dev->dev, "DDC timeout: %d\n", ret); + DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x", + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS), + hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS), + hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL)); + return ret; + } + + ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS); + + /* read back results of any read transactions: */ + for (i = 0; i < num; i++) { + struct i2c_msg *p = &msgs[i]; + + if (!(p->flags & I2C_M_RD)) + continue; + + /* check for NACK: */ + if (ddc_status & nack[i]) { + DBG("ddc_status=%08x", ddc_status); + break; + } + + ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) | + HDMI_DDC_DATA_INDEX(indices[i]) | + HDMI_DDC_DATA_INDEX_WRITE; + + hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data); + + /* discard first byte: */ + hdmi_read(hdmi, REG_HDMI_DDC_DATA); + + for (j = 0; j < p->len; j++) { + ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA); + p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA); + } + } + + return i; +} + +static u32 hdmi_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm hdmi_i2c_algorithm = { + .master_xfer = hdmi_i2c_xfer, + .functionality = hdmi_i2c_func, +}; + +void hdmi_i2c_irq(struct i2c_adapter *i2c) +{ + struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); + + if (sw_done(hdmi_i2c)) + wake_up_all(&hdmi_i2c->ddc_event); +} + +void hdmi_i2c_destroy(struct i2c_adapter *i2c) +{ + struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); + i2c_del_adapter(i2c); + kfree(hdmi_i2c); +} + +struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi) +{ + struct drm_device *dev = hdmi->dev; + struct hdmi_i2c_adapter *hdmi_i2c; + struct i2c_adapter *i2c = NULL; + int ret; + + hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL); + if (!hdmi_i2c) { + ret = -ENOMEM; + goto fail; + } + + i2c = &hdmi_i2c->base; + + hdmi_i2c->hdmi = hdmi; + init_waitqueue_head(&hdmi_i2c->ddc_event); + + + i2c->owner = THIS_MODULE; + i2c->class = I2C_CLASS_DDC; + snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c"); + i2c->dev.parent = &hdmi->pdev->dev; + i2c->algo = &hdmi_i2c_algorithm; + + ret = i2c_add_adapter(i2c); + if (ret) { + dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret); + goto fail; + } + + return i2c; + +fail: + if (i2c) + hdmi_i2c_destroy(i2c); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c new file mode 100644 index 000000000000..e5b7ed5b8f01 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "hdmi.h" + +struct hdmi_phy_8960 { + struct hdmi_phy base; + struct hdmi *hdmi; +}; +#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) + +static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) +{ + struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); + kfree(phy_8960); +} + +static void hdmi_phy_8960_reset(struct hdmi_phy *phy) +{ + struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); + struct hdmi *hdmi = phy_8960->hdmi; + unsigned int val; + + val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } + + msleep(100); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } +} + +static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, + unsigned long int pixclock) +{ + struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); + struct hdmi *hdmi = phy_8960->hdmi; + + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20); +} + +static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) +{ + struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); + struct hdmi *hdmi = phy_8960->hdmi; + + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f); +} + +static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { + .destroy = hdmi_phy_8960_destroy, + .reset = hdmi_phy_8960_reset, + .powerup = hdmi_phy_8960_powerup, + .powerdown = hdmi_phy_8960_powerdown, +}; + +struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) +{ + struct hdmi_phy_8960 *phy_8960; + struct hdmi_phy *phy = NULL; + int ret; + + phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); + if (!phy_8960) { + ret = -ENOMEM; + goto fail; + } + + phy = &phy_8960->base; + + phy->funcs = &hdmi_phy_8960_funcs; + + phy_8960->hdmi = hdmi; + + return phy; + +fail: + if (phy) + hdmi_phy_8960_destroy(phy); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c new file mode 100644 index 000000000000..391433c1af7c --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "hdmi.h" + +struct hdmi_phy_8x60 { + struct hdmi_phy base; + struct hdmi *hdmi; +}; +#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base) + +static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) +{ + struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); + kfree(phy_8x60); +} + +static void hdmi_phy_8x60_reset(struct hdmi_phy *phy) +{ + struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); + struct hdmi *hdmi = phy_8x60->hdmi; + unsigned int val; + + val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } + + msleep(100); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } +} + +static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, + unsigned long int pixclock) +{ + struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); + struct hdmi *hdmi = phy_8x60->hdmi; + + /* De-serializer delay D/C for non-lbk mode: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0, + HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); + + if (pixclock == 27000000) { + /* video_format == HDMI_VFRMT_720x480p60_16_9 */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); + } else { + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); + } + + /* No matter what, start from the power down mode: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); + + /* Turn PowerGen on: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); + + /* Turn PLL power on: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); + + /* Write to HIGH after PLL power down de-assert: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, + HDMI_8x60_PHY_REG3_PLL_ENABLE); + + /* ASIC power on; PHY REG9 = 0 */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); + + /* Enable PLL lock detect, PLL lock det will go high after lock + * Enable the re-time logic + */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); + + /* Drivers are on: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DESER); + + /* If the RX detector is needed: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_DESER); + + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0); + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0); + + /* If we want to use lock enable based on counting: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | + HDMI_8x60_PHY_REG12_FORCE_LOCK); +} + +static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) +{ + struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); + struct hdmi *hdmi = phy_8x60->hdmi; + + /* Assert RESET PHY from controller */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + HDMI_PHY_CTRL_SW_RESET); + udelay(10); + /* De-assert RESET PHY from controller */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0); + /* Turn off Driver */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); + udelay(10); + /* Disable PLL */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0); + /* Power down PHY, but keep RX-sense: */ + hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); +} + +static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { + .destroy = hdmi_phy_8x60_destroy, + .reset = hdmi_phy_8x60_reset, + .powerup = hdmi_phy_8x60_powerup, + .powerdown = hdmi_phy_8x60_powerdown, +}; + +struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi) +{ + struct hdmi_phy_8x60 *phy_8x60; + struct hdmi_phy *phy = NULL; + int ret; + + phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL); + if (!phy_8x60) { + ret = -ENOMEM; + goto fail; + } + + phy = &phy_8x60->base; + + phy->funcs = &hdmi_phy_8x60_funcs; + + phy_8x60->hdmi = hdmi; + + return phy; + +fail: + if (phy) + hdmi_phy_8x60_destroy(phy); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c new file mode 100644 index 000000000000..bda0fc40b207 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c @@ -0,0 +1,684 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "mdp4_kms.h" + +#include +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_flip_work.h" + +struct mdp4_crtc { + struct drm_crtc base; + char name[8]; + struct drm_plane *plane; + int id; + int ovlp; + enum mdp4_dma dma; + bool enabled; + + /* which mixer/encoder we route output to: */ + int mixer; + + struct { + spinlock_t lock; + bool stale; + uint32_t width, height; + + /* next cursor to scan-out: */ + uint32_t next_iova; + struct drm_gem_object *next_bo; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + } cursor; + + + /* if there is a pending flip, these will be non-null: */ + struct drm_pending_vblank_event *event; + struct work_struct pageflip_work; + + /* the fb that we currently hold a scanout ref to: */ + struct drm_framebuffer *fb; + + /* for unref'ing framebuffers after scanout completes: */ + struct drm_flip_work unref_fb_work; + + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + + struct mdp4_irq vblank; + struct mdp4_irq err; +}; +#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) + +static struct mdp4_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + return to_mdp4_kms(priv->kms); +} + +static void update_fb(struct drm_crtc *crtc, bool async, + struct drm_framebuffer *new_fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_framebuffer *old_fb = mdp4_crtc->fb; + + if (old_fb) + drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); + + /* grab reference to incoming scanout fb: */ + drm_framebuffer_reference(new_fb); + mdp4_crtc->base.fb = new_fb; + mdp4_crtc->fb = new_fb; + + if (!async) { + /* enable vblank to pick up the old_fb */ + mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); + } +} + +static void complete_flip(struct drm_crtc *crtc, bool canceled) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = mdp4_crtc->event; + if (event) { + mdp4_crtc->event = NULL; + if (canceled) + event->base.destroy(&event->base); + else + drm_send_vblank_event(dev, mdp4_crtc->id, event); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static void crtc_flush(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t flush = 0; + + flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane)); + flush |= ovlp2flush(mdp4_crtc->ovlp); + + DBG("%s: flush=%08x", mdp4_crtc->name, flush); + + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); +} + +static void pageflip_worker(struct work_struct *work) +{ + struct mdp4_crtc *mdp4_crtc = + container_of(work, struct mdp4_crtc, pageflip_work); + struct drm_crtc *crtc = &mdp4_crtc->base; + + mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb); + crtc_flush(crtc); + + /* enable vblank to complete flip: */ + mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); +} + +static void unref_fb_worker(struct drm_flip_work *work, void *val) +{ + struct mdp4_crtc *mdp4_crtc = + container_of(work, struct mdp4_crtc, unref_fb_work); + struct drm_device *dev = mdp4_crtc->base.dev; + + mutex_lock(&dev->mode_config.mutex); + drm_framebuffer_unreference(val); + mutex_unlock(&dev->mode_config.mutex); +} + +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp4_crtc *mdp4_crtc = + container_of(work, struct mdp4_crtc, unref_cursor_work); + struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); + + msm_gem_put_iova(val, mdp4_kms->id); + drm_gem_object_unreference_unlocked(val); +} + +static void mdp4_crtc_destroy(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane); + + drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work); + drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); + + kfree(mdp4_crtc); +} + +static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + bool enabled = (mode == DRM_MODE_DPMS_ON); + + DBG("%s: mode=%d", mdp4_crtc->name, mode); + + if (enabled != mdp4_crtc->enabled) { + if (enabled) { + mdp4_enable(mdp4_kms); + mdp4_irq_register(mdp4_kms, &mdp4_crtc->err); + } else { + mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err); + mdp4_disable(mdp4_kms); + } + mdp4_crtc->enabled = enabled; + } +} + +static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + int i, ovlp = mdp4_crtc->ovlp; + uint32_t mixer_cfg = 0; + + /* + * This probably would also need to be triggered by any attached + * plane when it changes.. for now since we are only using a single + * private plane, the configuration is hard-coded: + */ + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); + + for (i = 0; i < 4; i++) { + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), + MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | + MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST)); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); + } + + /* TODO single register for all CRTCs, so this won't work properly + * when multiple CRTCs are active.. + */ + switch (mdp4_plane_pipe(mdp4_crtc->plane)) { + case VG1: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); + break; + case VG2: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); + break; + case RGB1: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); + break; + case RGB2: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); + break; + case RGB3: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); + break; + case VG3: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); + break; + case VG4: + mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) | + COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); + break; + default: + WARN_ON("invalid pipe"); + break; + } + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); +} + +static int mdp4_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + enum mdp4_dma dma = mdp4_crtc->dma; + int ret, ovlp = mdp4_crtc->ovlp; + + mode = adjusted_mode; + + DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mdp4_crtc->name, mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), + MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | + MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); + + /* take data from pipe: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), + crtc->fb->pitches[0]); + mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), + MDP4_DMA_DST_SIZE_WIDTH(0) | + MDP4_DMA_DST_SIZE_HEIGHT(0)); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), + MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | + MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), + crtc->fb->pitches[0]); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); + + update_fb(crtc, false, crtc->fb); + + ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + x << 16, y << 16, + mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", + mdp4_crtc->name, ret); + return ret; + } + + if (dma == DMA_E) { + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); + } + + return 0; +} + +static void mdp4_crtc_prepare(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + DBG("%s", mdp4_crtc->name); + /* make sure we hold a ref to mdp clks while setting up mode: */ + mdp4_enable(get_kms(crtc)); + mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void mdp4_crtc_commit(struct drm_crtc *crtc) +{ + mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + crtc_flush(crtc); + /* drop the ref to mdp clk's that we got in prepare: */ + mdp4_disable(get_kms(crtc)); +} + +static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_plane *plane = mdp4_crtc->plane; + struct drm_display_mode *mode = &crtc->mode; + + update_fb(crtc, false, crtc->fb); + + return mdp4_plane_mode_set(plane, crtc, crtc->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + x << 16, y << 16, + mode->hdisplay << 16, mode->vdisplay << 16); +} + +static void mdp4_crtc_load_lut(struct drm_crtc *crtc) +{ +} + +static int mdp4_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *new_fb, + struct drm_pending_vblank_event *event) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_gem_object *obj; + + if (mdp4_crtc->event) { + dev_err(dev->dev, "already pending flip!\n"); + return -EBUSY; + } + + obj = msm_framebuffer_bo(new_fb, 0); + + mdp4_crtc->event = event; + update_fb(crtc, true, new_fb); + + return msm_gem_queue_inactive_work(obj, + &mdp4_crtc->pageflip_work); +} + +static int mdp4_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val) +{ + // XXX + return -EINVAL; +} + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +/* called from IRQ to update cursor related registers (if needed). The + * cursor registers, other than x/y position, appear not to be double + * buffered, and changing them other than from vblank seems to trigger + * underflow. + */ +static void update_cursor(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + enum mdp4_dma dma = mdp4_crtc->dma; + unsigned long flags; + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + if (mdp4_crtc->cursor.stale) { + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; + struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; + uint32_t iova = mdp4_crtc->cursor.next_iova; + + if (next_bo) { + /* take a obj ref + iova ref when we start scanning out: */ + drm_gem_object_reference(next_bo); + msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); + + /* enable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), + MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | + MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), + MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | + MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); + } else { + /* disable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), + MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); + } + + /* and drop the iova ref + obj rev when done scanning out: */ + if (prev_bo) + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); + + mdp4_crtc->cursor.scanout_bo = next_bo; + mdp4_crtc->cursor.stale = false; + } + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); +} + +static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_device *dev = crtc->dev; + struct drm_gem_object *cursor_bo, *old_bo; + unsigned long flags; + uint32_t iova; + int ret; + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + if (handle) { + cursor_bo = drm_gem_object_lookup(dev, file_priv, handle); + if (!cursor_bo) + return -ENOENT; + } else { + cursor_bo = NULL; + } + + if (cursor_bo) { + ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova); + if (ret) + goto fail; + } else { + iova = 0; + } + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + old_bo = mdp4_crtc->cursor.next_bo; + mdp4_crtc->cursor.next_bo = cursor_bo; + mdp4_crtc->cursor.next_iova = iova; + mdp4_crtc->cursor.width = width; + mdp4_crtc->cursor.height = height; + mdp4_crtc->cursor.stale = true; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + if (old_bo) { + /* drop our previous reference: */ + msm_gem_put_iova(old_bo, mdp4_kms->id); + drm_gem_object_unreference_unlocked(old_bo); + } + + return 0; + +fail: + drm_gem_object_unreference_unlocked(cursor_bo); + return ret; +} + +static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + enum mdp4_dma dma = mdp4_crtc->dma; + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), + MDP4_DMA_CURSOR_POS_X(x) | + MDP4_DMA_CURSOR_POS_Y(y)); + + return 0; +} + +static const struct drm_crtc_funcs mdp4_crtc_funcs = { + .set_config = drm_crtc_helper_set_config, + .destroy = mdp4_crtc_destroy, + .page_flip = mdp4_crtc_page_flip, + .set_property = mdp4_crtc_set_property, + .cursor_set = mdp4_crtc_cursor_set, + .cursor_move = mdp4_crtc_cursor_move, +}; + +static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { + .dpms = mdp4_crtc_dpms, + .mode_fixup = mdp4_crtc_mode_fixup, + .mode_set = mdp4_crtc_mode_set, + .prepare = mdp4_crtc_prepare, + .commit = mdp4_crtc_commit, + .mode_set_base = mdp4_crtc_mode_set_base, + .load_lut = mdp4_crtc_load_lut, +}; + +static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); + struct drm_crtc *crtc = &mdp4_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; + + update_cursor(crtc); + complete_flip(crtc, false); + mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); + + drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq); + drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); +} + +static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); + struct drm_crtc *crtc = &mdp4_crtc->base; + DBG("%s: error: %08x", mdp4_crtc->name, irqstatus); + crtc_flush(crtc); +} + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + return mdp4_crtc->vblank.irqmask; +} + +void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc) +{ + complete_flip(crtc, true); +} + +/* set dma config, ie. the format the encoder wants. */ +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config); +} + +/* set interface for routing crtc->encoder: */ +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t intf_sel; + + intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL); + + switch (mdp4_crtc->dma) { + case DMA_P: + intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf); + break; + case DMA_S: + intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf); + break; + case DMA_E: + intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf); + break; + } + + if (intf == INTF_DSI_VIDEO) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; + mdp4_crtc->mixer = 0; + } else if (intf == INTF_DSI_CMD) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; + mdp4_crtc->mixer = 0; + } else if (intf == INTF_LCDC_DTV){ + mdp4_crtc->mixer = 1; + } + + blend_setup(crtc); + + DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); + + mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); +} + +static const char *dma_names[] = { + "DMA_P", "DMA_S", "DMA_E", +}; + +/* initialize crtc */ +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id) +{ + struct drm_crtc *crtc = NULL; + struct mdp4_crtc *mdp4_crtc; + int ret; + + mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); + if (!mdp4_crtc) { + ret = -ENOMEM; + goto fail; + } + + crtc = &mdp4_crtc->base; + + mdp4_crtc->plane = plane; + mdp4_crtc->plane->crtc = crtc; + + mdp4_crtc->ovlp = ovlp_id; + mdp4_crtc->dma = dma_id; + + mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma); + mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq; + + mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma); + mdp4_crtc->err.irq = mdp4_crtc_err_irq; + + snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d", + dma_names[dma_id], ovlp_id); + + spin_lock_init(&mdp4_crtc->cursor.lock); + + ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16, + "unref fb", unref_fb_worker); + if (ret) + goto fail; + + ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64, + "unref cursor", unref_cursor_worker); + + INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker); + + drm_crtc_init(dev, crtc, &mdp4_crtc_funcs); + drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); + + mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base); + + return crtc; + +fail: + if (crtc) + mdp4_crtc_destroy(crtc); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c new file mode 100644 index 000000000000..06d49e309d34 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c @@ -0,0 +1,317 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "mdp4_kms.h" +#include "msm_connector.h" + +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + + +struct mdp4_dtv_encoder { + struct drm_encoder base; + struct clk *src_clk; + struct clk *hdmi_clk; + struct clk *mdp_clk; + unsigned long int pixclock; + bool enabled; + uint32_t bsc; +}; +#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(priv->kms); +} + +#ifdef CONFIG_MSM_BUS_SCALING +#include +/* not ironically named at all.. no, really.. */ +static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) +{ + struct drm_device *dev = mdp4_dtv_encoder->base.dev; + struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); + + if (!dtv_pdata) { + dev_err(dev->dev, "could not find dtv pdata\n"); + return; + } + + if (dtv_pdata->bus_scale_table) { + mdp4_dtv_encoder->bsc = msm_bus_scale_register_client( + dtv_pdata->bus_scale_table); + DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc); + DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save); + if (dtv_pdata->lcdc_power_save) + dtv_pdata->lcdc_power_save(1); + } +} + +static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) +{ + if (mdp4_dtv_encoder->bsc) { + msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc); + mdp4_dtv_encoder->bsc = 0; + } +} + +static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) +{ + if (mdp4_dtv_encoder->bsc) { + DBG("set bus scaling: %d", idx); + msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); + } +} +#else +static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} +static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} +static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {} +#endif + +static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + bs_fini(mdp4_dtv_encoder); + drm_encoder_cleanup(encoder); + kfree(mdp4_dtv_encoder); +} + +static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { + .destroy = mdp4_dtv_encoder_destroy, +}; + +static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct msm_connector *msm_connector = get_connector(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + bool enabled = (mode == DRM_MODE_DPMS_ON); + + DBG("mode=%d", mode); + + if (enabled == mdp4_dtv_encoder->enabled) + return; + + if (enabled) { + unsigned long pc = mdp4_dtv_encoder->pixclock; + int ret; + + bs_set(mdp4_dtv_encoder, 1); + + if (msm_connector) + msm_connector->funcs->dpms(msm_connector, mode); + + DBG("setting src_clk=%lu", pc); + + ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); + if (ret) + dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); + clk_prepare_enable(mdp4_dtv_encoder->src_clk); + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); + if (ret) + dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); + } else { + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC); + + clk_disable_unprepare(mdp4_dtv_encoder->src_clk); + clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); + clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); + + if (msm_connector) + msm_connector->funcs->dpms(msm_connector, mode); + + bs_set(mdp4_dtv_encoder, 0); + } + + mdp4_dtv_encoder->enabled = enabled; +} + +static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct msm_connector *msm_connector = get_connector(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mdp4_dtv_encoder->pixclock = mode->clock * 1000; + + DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + dtv_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL, + MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL, + MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end); + mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR, + MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL, + MDP4_DTV_ACTIVE_HCTL_START(0) | + MDP4_DTV_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); + + if (msm_connector) + msm_connector->funcs->mode_set(msm_connector, mode); +} + +static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder) +{ + mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder) +{ + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_R_BPC(BPC8) | + MDP4_DMA_CONFIG_G_BPC(BPC8) | + MDP4_DMA_CONFIG_B_BPC(BPC8) | + MDP4_DMA_CONFIG_PACK(0x21)); + mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV); + mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { + .dpms = mdp4_dtv_encoder_dpms, + .mode_fixup = mdp4_dtv_encoder_mode_fixup, + .mode_set = mdp4_dtv_encoder_mode_set, + .prepare = mdp4_dtv_encoder_prepare, + .commit = mdp4_dtv_encoder_commit, +}; + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + return clk_round_rate(mdp4_dtv_encoder->src_clk, rate); +} + +/* initialize encoder */ +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_dtv_encoder *mdp4_dtv_encoder; + int ret; + + mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); + if (!mdp4_dtv_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp4_dtv_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); + + mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk"); + if (IS_ERR(mdp4_dtv_encoder->src_clk)) { + dev_err(dev->dev, "failed to get src_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->src_clk); + goto fail; + } + + mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); + if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { + dev_err(dev->dev, "failed to get hdmi_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); + goto fail; + } + + mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk"); + if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { + dev_err(dev->dev, "failed to get mdp_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); + goto fail; + } + + bs_init(mdp4_dtv_encoder); + + return encoder; + +fail: + if (encoder) + mdp4_dtv_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c new file mode 100644 index 000000000000..7b645f2e837a --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "mdp4_kms.h" + +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ + .base = { .pixel_format = DRM_FORMAT_ ## name }, \ + .bpc_a = BPC ## a ## A, \ + .bpc_r = BPC ## r, \ + .bpc_g = BPC ## g, \ + .bpc_b = BPC ## b, \ + .unpack = { e0, e1, e2, e3 }, \ + .alpha_enable = alpha, \ + .unpack_tight = tight, \ + .cpp = c, \ + .unpack_count = cnt, \ + } + +#define BPC0A 0 + +static const struct mdp4_format formats[] = { + /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ + FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), + FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), + FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3), + FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3), + FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3), + FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), +}; + +const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) +{ + int i; + for (i = 0; i < ARRAY_SIZE(formats); i++) { + const struct mdp4_format *f = &formats[i]; + if (f->base.pixel_format == format) + return &f->base; + } + return NULL; +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c new file mode 100644 index 000000000000..5c6b7fca4edd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "mdp4_kms.h" + + +struct mdp4_irq_wait { + struct mdp4_irq irq; + int count; +}; + +static DECLARE_WAIT_QUEUE_HEAD(wait_event); + +static DEFINE_SPINLOCK(list_lock); + +static void update_irq(struct mdp4_kms *mdp4_kms) +{ + struct mdp4_irq *irq; + uint32_t irqmask = mdp4_kms->vblank_mask; + + BUG_ON(!spin_is_locked(&list_lock)); + + list_for_each_entry(irq, &mdp4_kms->irq_list, node) + irqmask |= irq->irqmask; + + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); +} + +static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) +{ + unsigned long flags; + spin_lock_irqsave(&list_lock, flags); + update_irq(mdp4_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) +{ + DRM_ERROR("errors: %08x\n", irqstatus); +} + +void mdp4_irq_preinstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); +} + +int mdp4_irq_postinstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + struct mdp4_irq *error_handler = &mdp4_kms->error_handler; + + INIT_LIST_HEAD(&mdp4_kms->irq_list); + + error_handler->irq = mdp4_irq_error_handler; + error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | + MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + + mdp4_irq_register(mdp4_kms, error_handler); + + return 0; +} + +void mdp4_irq_uninstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); +} + +irqreturn_t mdp4_irq(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp4_irq *handler, *n; + unsigned long flags; + unsigned int id; + uint32_t status; + + status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); + + VERB("status=%08x", status); + + for (id = 0; id < priv->num_crtcs; id++) + if (status & mdp4_crtc_vblank(priv->crtcs[id])) + drm_handle_vblank(dev, id); + + spin_lock_irqsave(&list_lock, flags); + mdp4_kms->in_irq = true; + list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { + if (handler->irqmask & status) { + spin_unlock_irqrestore(&list_lock, flags); + handler->irq(handler, handler->irqmask & status); + spin_lock_irqsave(&list_lock, flags); + } + } + mdp4_kms->in_irq = false; + update_irq(mdp4_kms); + spin_unlock_irqrestore(&list_lock, flags); + + return IRQ_HANDLED; +} + +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc); + update_irq(mdp4_kms); + spin_unlock_irqrestore(&list_lock, flags); + + return 0; +} + +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); + update_irq(mdp4_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) +{ + struct mdp4_irq_wait *wait = + container_of(irq, struct mdp4_irq_wait, irq); + wait->count--; + wake_up_all(&wait_event); +} + +void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) +{ + struct mdp4_irq_wait wait = { + .irq = { + .irq = wait_irq, + .irqmask = irqmask, + }, + .count = 1, + }; + mdp4_irq_register(mdp4_kms, &wait.irq); + wait_event(wait_event, (wait.count <= 0)); + mdp4_irq_unregister(mdp4_kms, &wait.irq); +} + +void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (!irq->registered) { + irq->registered = true; + list_add(&irq->node, &mdp4_kms->irq_list); + needs_update = !mdp4_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + update_irq_unlocked(mdp4_kms); +} + +void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (irq->registered) { + irq->registered = false; + list_del(&irq->node); + needs_update = !mdp4_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + update_irq_unlocked(mdp4_kms); +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c new file mode 100644 index 000000000000..960cd894da78 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c @@ -0,0 +1,368 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "mdp4_kms.h" + +#include + +static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); + +static int mdp4_hw_init(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + struct drm_device *dev = mdp4_kms->dev; + uint32_t version, major, minor, dmap_cfg, vg_cfg; + unsigned long clk; + int ret = 0; + + pm_runtime_get_sync(dev->dev); + + version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); + + major = FIELD(version, MDP4_VERSION_MAJOR); + minor = FIELD(version, MDP4_VERSION_MINOR); + + DBG("found MDP version v%d.%d", major, minor); + + if (major != 4) { + dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto out; + } + + mdp4_kms->rev = minor; + + if (mdp4_kms->dsi_pll_vdda) { + if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) { + ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda, + 1200000, 1200000); + if (ret) { + dev_err(dev->dev, + "failed to set dsi_pll_vdda voltage: %d\n", ret); + goto out; + } + } + } + + if (mdp4_kms->dsi_pll_vddio) { + if (mdp4_kms->rev == 2) { + ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio, + 1800000, 1800000); + if (ret) { + dev_err(dev->dev, + "failed to set dsi_pll_vddio voltage: %d\n", ret); + goto out; + } + } + } + + if (mdp4_kms->rev > 1) { + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); + } + + mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3); + + /* max read pending cmd config, 3 pending requests: */ + mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222); + + clk = clk_get_rate(mdp4_kms->clk); + + if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) { + dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */ + vg_cfg = 0x47; /* 16 bytes-burs x 8 req */ + } else { + dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */ + vg_cfg = 0x43; /* 16 bytes-burst x 4 req */ + } + + DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg); + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg); + + if (mdp4_kms->rev >= 2) + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); + + /* disable CSC matrix / YUV by default: */ + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0); + + if (mdp4_kms->rev > 1) + mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); + +out: + pm_runtime_put_sync(dev->dev); + + return ret; +} + +static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder) +{ + /* if we had >1 encoder, we'd need something more clever: */ + return mdp4_dtv_round_pixclk(encoder, rate); +} + +static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + struct msm_drm_private *priv = mdp4_kms->dev->dev_private; + unsigned i; + + for (i = 0; i < priv->num_crtcs; i++) + mdp4_crtc_cancel_pending_flip(priv->crtcs[i]); +} + +static void mdp4_destroy(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); + kfree(mdp4_kms); +} + +static const struct msm_kms_funcs kms_funcs = { + .hw_init = mdp4_hw_init, + .irq_preinstall = mdp4_irq_preinstall, + .irq_postinstall = mdp4_irq_postinstall, + .irq_uninstall = mdp4_irq_uninstall, + .irq = mdp4_irq, + .enable_vblank = mdp4_enable_vblank, + .disable_vblank = mdp4_disable_vblank, + .get_format = mdp4_get_format, + .round_pixclk = mdp4_round_pixclk, + .preclose = mdp4_preclose, + .destroy = mdp4_destroy, +}; + +int mdp4_disable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_disable_unprepare(mdp4_kms->clk); + if (mdp4_kms->pclk) + clk_disable_unprepare(mdp4_kms->pclk); + clk_disable_unprepare(mdp4_kms->lut_clk); + + return 0; +} + +int mdp4_enable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_prepare_enable(mdp4_kms->clk); + if (mdp4_kms->pclk) + clk_prepare_enable(mdp4_kms->pclk); + clk_prepare_enable(mdp4_kms->lut_clk); + + return 0; +} + +static int modeset_init(struct mdp4_kms *mdp4_kms) +{ + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + /* + * NOTE: this is a bit simplistic until we add support + * for more than just RGB1->DMA_E->DTV->HDMI + */ + + /* the CRTCs get constructed with a private plane: */ + plane = mdp4_plane_init(dev, RGB1, true); + if (IS_ERR(plane)) { + dev_err(dev->dev, "failed to construct plane for RGB1\n"); + ret = PTR_ERR(plane); + goto fail; + } + + crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E); + if (IS_ERR(crtc)) { + dev_err(dev->dev, "failed to construct crtc for DMA_E\n"); + ret = PTR_ERR(crtc); + goto fail; + } + priv->crtcs[priv->num_crtcs++] = crtc; + + encoder = mdp4_dtv_encoder_init(dev); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct DTV encoder\n"); + ret = PTR_ERR(encoder); + goto fail; + } + encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ + priv->encoders[priv->num_encoders++] = encoder; + + connector = hdmi_connector_init(dev, encoder); + if (IS_ERR(connector)) { + dev_err(dev->dev, "failed to construct HDMI connector\n"); + ret = PTR_ERR(connector); + goto fail; + } + priv->connectors[priv->num_connectors++] = connector; + + return 0; + +fail: + return ret; +} + +static const char *iommu_ports[] = { + "mdp_port0_cb0", "mdp_port1_cb0", +}; + +struct msm_kms *mdp4_kms_init(struct drm_device *dev) +{ + struct platform_device *pdev = dev->platformdev; + struct mdp4_platform_config *config = mdp4_get_config(pdev); + struct mdp4_kms *mdp4_kms; + struct msm_kms *kms = NULL; + int ret; + + mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); + if (!mdp4_kms) { + dev_err(dev->dev, "failed to allocate kms\n"); + ret = -ENOMEM; + goto fail; + } + + kms = &mdp4_kms->base; + kms->funcs = &kms_funcs; + + mdp4_kms->dev = dev; + + mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4"); + if (IS_ERR(mdp4_kms->mmio)) { + ret = PTR_ERR(mdp4_kms->mmio); + goto fail; + } + + mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda"); + if (IS_ERR(mdp4_kms->dsi_pll_vdda)) + mdp4_kms->dsi_pll_vdda = NULL; + + mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio"); + if (IS_ERR(mdp4_kms->dsi_pll_vddio)) + mdp4_kms->dsi_pll_vddio = NULL; + + mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(mdp4_kms->vdd)) + mdp4_kms->vdd = NULL; + + if (mdp4_kms->vdd) { + ret = regulator_enable(mdp4_kms->vdd); + if (ret) { + dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); + goto fail; + } + } + + mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(mdp4_kms->clk)) { + dev_err(dev->dev, "failed to get core_clk\n"); + ret = PTR_ERR(mdp4_kms->clk); + goto fail; + } + + mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); + if (IS_ERR(mdp4_kms->pclk)) + mdp4_kms->pclk = NULL; + + // XXX if (rev >= MDP_REV_42) { ??? + mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); + if (IS_ERR(mdp4_kms->lut_clk)) { + dev_err(dev->dev, "failed to get lut_clk\n"); + ret = PTR_ERR(mdp4_kms->lut_clk); + goto fail; + } + + clk_set_rate(mdp4_kms->clk, config->max_clk); + clk_set_rate(mdp4_kms->lut_clk, config->max_clk); + + if (!config->iommu) { + dev_err(dev->dev, "no iommu\n"); + ret = -ENXIO; + goto fail; + } + + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); + mdelay(16); + + ret = msm_iommu_attach(dev, config->iommu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + if (ret) + goto fail; + + mdp4_kms->id = msm_register_iommu(dev, config->iommu); + if (mdp4_kms->id < 0) { + ret = mdp4_kms->id; + dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); + goto fail; + } + + ret = modeset_init(mdp4_kms); + if (ret) { + dev_err(dev->dev, "modeset_init failed: %d\n", ret); + goto fail; + } + + return kms; + +fail: + if (kms) + mdp4_destroy(kms); + return ERR_PTR(ret); +} + +static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) +{ + static struct mdp4_platform_config config = {}; +#ifdef CONFIG_OF + /* TODO */ +#else + if (cpu_is_apq8064()) + config.max_clk = 266667000; + else + config.max_clk = 200000000; + + config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN); +#endif + return &config; +} diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h new file mode 100644 index 000000000000..1e83554955f3 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP4_KMS_H__ +#define __MDP4_KMS_H__ + +#include +#include +#include + +#include "msm_drv.h" +#include "mdp4.xml.h" + + +/* For transiently registering for different MDP4 irqs that various parts + * of the KMS code need during setup/configuration. We these are not + * necessarily the same as what drm_vblank_get/put() are requesting, and + * the hysteresis in drm_vblank_put() is not necessarily desirable for + * internal housekeeping related irq usage. + */ +struct mdp4_irq { + struct list_head node; + uint32_t irqmask; + bool registered; + void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus); +}; + +struct mdp4_kms { + struct msm_kms base; + + struct drm_device *dev; + + int rev; + + /* mapper-id used to request GEM buffer mapped for scanout: */ + int id; + + void __iomem *mmio; + + struct regulator *dsi_pll_vdda; + struct regulator *dsi_pll_vddio; + struct regulator *vdd; + + struct clk *clk; + struct clk *pclk; + struct clk *lut_clk; + + /* irq handling: */ + bool in_irq; + struct list_head irq_list; /* list of mdp4_irq */ + uint32_t vblank_mask; /* irq bits set for userspace vblank */ + struct mdp4_irq error_handler; +}; +#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) + +/* platform config data (ie. from DT, or pdata) */ +struct mdp4_platform_config { + struct iommu_domain *iommu; + uint32_t max_clk; +}; + +struct mdp4_format { + struct msm_format base; + enum mpd4_bpc bpc_r, bpc_g, bpc_b; + enum mpd4_bpc_alpha bpc_a; + uint8_t unpack[4]; + bool alpha_enable, unpack_tight; + uint8_t cpp, unpack_count; +}; +#define to_mdp4_format(x) container_of(x, struct mdp4_format, base) + +static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) +{ + msm_writel(data, mdp4_kms->mmio + reg); +} + +static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg) +{ + return msm_readl(mdp4_kms->mmio + reg); +} + +static inline uint32_t pipe2flush(enum mpd4_pipe pipe) +{ + switch (pipe) { + case VG1: return MDP4_OVERLAY_FLUSH_VG1; + case VG2: return MDP4_OVERLAY_FLUSH_VG2; + case RGB1: return MDP4_OVERLAY_FLUSH_RGB1; + case RGB2: return MDP4_OVERLAY_FLUSH_RGB1; + default: return 0; + } +} + +static inline uint32_t ovlp2flush(int ovlp) +{ + switch (ovlp) { + case 0: return MDP4_OVERLAY_FLUSH_OVLP0; + case 1: return MDP4_OVERLAY_FLUSH_OVLP1; + default: return 0; + } +} + +static inline uint32_t dma2irq(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_DMA_P_DONE; + case DMA_S: return MDP4_IRQ_DMA_S_DONE; + case DMA_E: return MDP4_IRQ_DMA_E_DONE; + default: return 0; + } +} + +static inline uint32_t dma2err(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN; + case DMA_S: return 0; // ??? + case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + default: return 0; + } +} + +int mdp4_disable(struct mdp4_kms *mdp4_kms); +int mdp4_enable(struct mdp4_kms *mdp4_kms); + +void mdp4_irq_preinstall(struct msm_kms *kms); +int mdp4_irq_postinstall(struct msm_kms *kms); +void mdp4_irq_uninstall(struct msm_kms *kms); +irqreturn_t mdp4_irq(struct msm_kms *kms); +void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask); +void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); +void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); + +const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); + +void mdp4_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj); +void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb); +int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane); +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mpd4_pipe pipe_id, bool private_plane); + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); +void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc); +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf); +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id); + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); + +#ifdef CONFIG_MSM_BUS_SCALING +static inline int match_dev_name(struct device *dev, void *data) +{ + return !strcmp(dev_name(dev), data); +} +/* bus scaling data is associated with extra pointless platform devices, + * "dtv", etc.. this is a bit of a hack, but we need a way for encoders + * to find their pdata to make the bus-scaling stuff work. + */ +static inline void *mdp4_find_pdata(const char *devname) +{ + struct device *dev; + dev = bus_find_device(&platform_bus_type, NULL, + (void *)devname, match_dev_name); + return dev ? dev->platform_data : NULL; +} +#endif + +#endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c new file mode 100644 index 000000000000..3468229d58b3 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "mdp4_kms.h" + + +struct mdp4_plane { + struct drm_plane base; + const char *name; + + enum mpd4_pipe pipe; + + uint32_t nformats; + uint32_t formats[32]; + + bool enabled; +}; +#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) + +static struct mdp4_kms *get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + return to_mdp4_kms(priv->kms); +} + +static int mdp4_plane_update(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + + mdp4_plane->enabled = true; + + if (plane->fb) + drm_framebuffer_unreference(plane->fb); + + drm_framebuffer_reference(fb); + + return mdp4_plane_mode_set(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); +} + +static int mdp4_plane_disable(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + DBG("%s: TODO", mdp4_plane->name); // XXX + return 0; +} + +static void mdp4_plane_destroy(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + + mdp4_plane_disable(plane); + drm_plane_cleanup(plane); + + kfree(mdp4_plane); +} + +/* helper to install properties which are common to planes and crtcs */ +void mdp4_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ + // XXX +} + +int mdp4_plane_set_property(struct drm_plane *plane, + struct drm_property *property, uint64_t val) +{ + // XXX + return -EINVAL; +} + +static const struct drm_plane_funcs mdp4_plane_funcs = { + .update_plane = mdp4_plane_update, + .disable_plane = mdp4_plane_disable, + .destroy = mdp4_plane_destroy, + .set_property = mdp4_plane_set_property, +}; + +void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + enum mpd4_pipe pipe = mdp4_plane->pipe; + uint32_t iova; + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), + MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe), + MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); + + plane->fb = fb; +} + +#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 + +int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + enum mpd4_pipe pipe = mdp4_plane->pipe; + const struct mdp4_format *format; + uint32_t op_mode = 0; + uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; + uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; + + /* src values are in Q16 fixed point, convert to integer: */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + if (src_w != crtc_w) { + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; + /* TODO calc phasex_step */ + } + + if (src_h != crtc_h) { + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; + /* TODO calc phasey_step */ + } + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), + MDP4_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP4_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe), + MDP4_PIPE_SRC_XY_X(src_x) | + MDP4_PIPE_SRC_XY_Y(src_y)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe), + MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) | + MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), + MDP4_PIPE_SRC_XY_X(crtc_x) | + MDP4_PIPE_SRC_XY_Y(crtc_y)); + + mdp4_plane_set_scanout(plane, fb); + + format = to_mdp4_format(msm_framebuffer_format(fb)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), + MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), + MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); + + plane->crtc = crtc; + + return 0; +} + +static const char *pipe_names[] = { + "VG1", "VG2", + "RGB1", "RGB2", "RGB3", + "VG3", "VG4", +}; + +enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + return mdp4_plane->pipe; +} + +/* initialize plane */ +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mpd4_pipe pipe_id, bool private_plane) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_plane *plane = NULL; + struct mdp4_plane *mdp4_plane; + int ret; + + mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); + if (!mdp4_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &mdp4_plane->base; + + mdp4_plane->pipe = pipe_id; + mdp4_plane->name = pipe_names[pipe_id]; + + drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs, + mdp4_plane->formats, mdp4_plane->nformats, private_plane); + + mdp4_plane_install_properties(plane, &plane->base); + + return plane; + +fail: + if (plane) + mdp4_plane_destroy(plane); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/msm_connector.c b/drivers/gpu/drm/msm/msm_connector.c new file mode 100644 index 000000000000..aeea8879e36f --- /dev/null +++ b/drivers/gpu/drm/msm/msm_connector.c @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" +#include "msm_connector.h" + +void msm_connector_init(struct msm_connector *connector, + const struct msm_connector_funcs *funcs, + struct drm_encoder *encoder) +{ + connector->funcs = funcs; + connector->encoder = encoder; +} + +struct drm_encoder *msm_connector_attached_encoder( + struct drm_connector *connector) +{ + struct msm_connector *msm_connector = to_msm_connector(connector); + return msm_connector->encoder; +} diff --git a/drivers/gpu/drm/msm/msm_connector.h b/drivers/gpu/drm/msm/msm_connector.h new file mode 100644 index 000000000000..0b41866adc08 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_connector.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_CONNECTOR_H__ +#define __MSM_CONNECTOR_H__ + +#include "msm_drv.h" + +/* + * Base class for MSM connectors. Typically a connector is a bit more + * passive. But with the split between (for example) DTV within MDP4, + * and HDMI encoder, we really need two parts to an encoder. Instead + * what we do is have the part external to the display controller block + * in the connector, which is called from the encoder to delegate the + * appropriate parts of modeset. + */ + +struct msm_connector; + +struct msm_connector_funcs { + void (*dpms)(struct msm_connector *connector, int mode); + void (*mode_set)(struct msm_connector *connector, + struct drm_display_mode *mode); +}; + +struct msm_connector { + struct drm_connector base; + struct drm_encoder *encoder; + const struct msm_connector_funcs *funcs; +}; +#define to_msm_connector(x) container_of(x, struct msm_connector, base) + +void msm_connector_init(struct msm_connector *connector, + const struct msm_connector_funcs *funcs, + struct drm_encoder *encoder); + +struct drm_encoder *msm_connector_attached_encoder( + struct drm_connector *connector); + +static inline struct msm_connector *get_connector(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + int i; + + for (i = 0; i < priv->num_connectors; i++) { + struct drm_connector *connector = priv->connectors[i]; + if (msm_connector_attached_encoder(connector) == encoder) + return to_msm_connector(connector); + } + + return NULL; +} + +#endif /* __MSM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c new file mode 100644 index 000000000000..b5ae0dbe1eb8 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -0,0 +1,532 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" + +#include + +static void msm_fb_output_poll_changed(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + if (priv->fbdev) + drm_fb_helper_hotplug_event(priv->fbdev); +} + +static const struct drm_mode_config_funcs mode_config_funcs = { + .fb_create = msm_framebuffer_create, + .output_poll_changed = msm_fb_output_poll_changed, +}; + +static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, + unsigned long iova, int flags, void *arg) +{ + DBG("*** fault: iova=%08lx, flags=%d", iova, flags); + return 0; +} + +int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu) +{ + struct msm_drm_private *priv = dev->dev_private; + int idx = priv->num_iommus++; + + if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus))) + return -EINVAL; + + priv->iommus[idx] = iommu; + + iommu_set_fault_handler(iommu, msm_fault_handler, dev); + + /* need to iommu_attach_device() somewhere?? on resume?? */ + + return idx; +} + +int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, + const char **names, int cnt) +{ + int i, ret; + + for (i = 0; i < cnt; i++) { + struct device *ctx = msm_iommu_get_ctx(names[i]); + if (!ctx) + continue; + ret = iommu_attach_device(iommu, ctx); + if (ret) { + dev_warn(dev->dev, "could not attach iommu to %s", names[i]); + return ret; + } + } + return 0; +} + +#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING +static bool reglog = false; +MODULE_PARM_DESC(reglog, "Enable register read/write logging"); +module_param(reglog, bool, 0600); +#else +#define reglog 0 +#endif + +void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname) +{ + struct resource *res; + unsigned long size; + void __iomem *ptr; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); + return ERR_PTR(-EINVAL); + } + + size = resource_size(res); + + ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); + if (!ptr) { + dev_err(&pdev->dev, "failed to ioremap: %s\n", name); + return ERR_PTR(-ENOMEM); + } + + if (reglog) + printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size); + + return ptr; +} + +void msm_writel(u32 data, void __iomem *addr) +{ + if (reglog) + printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data); + writel(data, addr); +} + +u32 msm_readl(const void __iomem *addr) +{ + u32 val = readl(addr); + if (reglog) + printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val); + return val; +} + +/* + * DRM operations: + */ + +static int msm_unload(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); + drm_vblank_cleanup(dev); + + pm_runtime_get_sync(dev->dev); + drm_irq_uninstall(dev); + pm_runtime_put_sync(dev->dev); + + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + + if (kms) { + pm_runtime_disable(dev->dev); + kms->funcs->destroy(kms); + } + + + dev->dev_private = NULL; + + kfree(priv); + + return 0; +} + +static int msm_load(struct drm_device *dev, unsigned long flags) +{ + struct platform_device *pdev = dev->platformdev; + struct msm_drm_private *priv; + struct msm_kms *kms; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev->dev, "failed to allocate private data\n"); + return -ENOMEM; + } + + dev->dev_private = priv; + + priv->wq = alloc_ordered_workqueue("msm", 0); + + INIT_LIST_HEAD(&priv->inactive_list); + + drm_mode_config_init(dev); + + kms = mdp4_kms_init(dev); + if (IS_ERR(kms)) { + /* + * NOTE: once we have GPU support, having no kms should not + * be considered fatal.. ideally we would still support gpu + * and (for example) use dmabuf/prime to share buffers with + * imx drm driver on iMX5 + */ + dev_err(dev->dev, "failed to load kms\n"); + ret = PTR_ERR(priv->kms); + goto fail; + } + + priv->kms = kms; + + if (kms) { + pm_runtime_enable(dev->dev); + ret = kms->funcs->hw_init(kms); + if (ret) { + dev_err(dev->dev, "kms hw init failed: %d\n", ret); + goto fail; + } + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + dev->mode_config.funcs = &mode_config_funcs; + + ret = drm_vblank_init(dev, 1); + if (ret < 0) { + dev_err(dev->dev, "failed to initialize vblank\n"); + goto fail; + } + + pm_runtime_get_sync(dev->dev); + ret = drm_irq_install(dev); + pm_runtime_put_sync(dev->dev); + if (ret < 0) { + dev_err(dev->dev, "failed to install IRQ handler\n"); + goto fail; + } + + platform_set_drvdata(pdev, dev); + +#ifdef CONFIG_DRM_MSM_FBDEV + priv->fbdev = msm_fbdev_init(dev); +#endif + + drm_kms_helper_poll_init(dev); + + return 0; + +fail: + msm_unload(dev); + return ret; +} + +static void msm_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + if (kms) + kms->funcs->preclose(kms, file); +} + +static void msm_lastclose(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + if (priv->fbdev) { + drm_modeset_lock_all(dev); + drm_fb_helper_restore_fbdev_mode(priv->fbdev); + drm_modeset_unlock_all(dev); + } +} + +static irqreturn_t msm_irq(DRM_IRQ_ARGS) +{ + struct drm_device *dev = arg; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + BUG_ON(!kms); + return kms->funcs->irq(kms); +} + +static void msm_irq_preinstall(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + BUG_ON(!kms); + kms->funcs->irq_preinstall(kms); +} + +static int msm_irq_postinstall(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + BUG_ON(!kms); + return kms->funcs->irq_postinstall(kms); +} + +static void msm_irq_uninstall(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + BUG_ON(!kms); + kms->funcs->irq_uninstall(kms); +} + +static int msm_enable_vblank(struct drm_device *dev, int crtc_id) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + if (!kms) + return -ENXIO; + DBG("dev=%p, crtc=%d", dev, crtc_id); + return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); +} + +static void msm_disable_vblank(struct drm_device *dev, int crtc_id) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + if (!kms) + return; + DBG("dev=%p, crtc=%d", dev, crtc_id); + kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); +} + +/* + * DRM debugfs: + */ + +#ifdef CONFIG_DEBUG_FS +static int msm_gem_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + + seq_printf(m, "All Objects:\n"); + msm_gem_describe_objects(&priv->inactive_list, m); + + return 0; +} + +static int msm_mm_show(struct drm_device *dev, struct seq_file *m) +{ + return drm_mm_dump_table(m, dev->mm_private); +} + +static int msm_fb_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_framebuffer *fb, *fbdev_fb = NULL; + + if (priv->fbdev) { + seq_printf(m, "fbcon "); + fbdev_fb = priv->fbdev->fb; + msm_framebuffer_describe(fbdev_fb, m); + } + + mutex_lock(&dev->mode_config.fb_lock); + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + if (fb == fbdev_fb) + continue; + + seq_printf(m, "user "); + msm_framebuffer_describe(fb, m); + } + mutex_unlock(&dev->mode_config.fb_lock); + + return 0; +} + +static int show_locked(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int (*show)(struct drm_device *dev, struct seq_file *m) = + node->info_ent->data; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + ret = show(dev, m); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static struct drm_info_list msm_debugfs_list[] = { + {"gem", show_locked, 0, msm_gem_show}, + { "mm", show_locked, 0, msm_mm_show }, + { "fb", show_locked, 0, msm_fb_show }, +}; + +static int msm_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install msm_debugfs_list\n"); + return ret; + } + + return ret; +} + +static void msm_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), minor); +} +#endif + +static const struct vm_operations_struct vm_ops = { + .fault = msm_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = msm_gem_mmap, +}; + +static struct drm_driver msm_driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, + .load = msm_load, + .unload = msm_unload, + .preclose = msm_preclose, + .lastclose = msm_lastclose, + .irq_handler = msm_irq, + .irq_preinstall = msm_irq_preinstall, + .irq_postinstall = msm_irq_postinstall, + .irq_uninstall = msm_irq_uninstall, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = msm_enable_vblank, + .disable_vblank = msm_disable_vblank, + .gem_free_object = msm_gem_free_object, + .gem_vm_ops = &vm_ops, + .dumb_create = msm_gem_dumb_create, + .dumb_map_offset = msm_gem_dumb_map_offset, + .dumb_destroy = msm_gem_dumb_destroy, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = msm_debugfs_init, + .debugfs_cleanup = msm_debugfs_cleanup, +#endif + .fops = &fops, + .name = "msm", + .desc = "MSM Snapdragon DRM", + .date = "20130625", + .major = 1, + .minor = 0, +}; + +#ifdef CONFIG_PM_SLEEP +static int msm_pm_suspend(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + + drm_kms_helper_poll_disable(ddev); + + return 0; +} + +static int msm_pm_resume(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + + drm_kms_helper_poll_enable(ddev); + + return 0; +} +#endif + +static const struct dev_pm_ops msm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) +}; + +/* + * Platform driver: + */ + +static int msm_pdev_probe(struct platform_device *pdev) +{ + return drm_platform_init(&msm_driver, pdev); +} + +static int msm_pdev_remove(struct platform_device *pdev) +{ + drm_platform_exit(&msm_driver, pdev); + + return 0; +} + +static const struct platform_device_id msm_id[] = { + { "mdp", 0 }, + { } +}; + +static struct platform_driver msm_platform_driver = { + .probe = msm_pdev_probe, + .remove = msm_pdev_remove, + .driver = { + .owner = THIS_MODULE, + .name = "msm", + .pm = &msm_pm_ops, + }, + .id_table = msm_id, +}; + +static int __init msm_drm_register(void) +{ + DBG("init"); + hdmi_register(); + return platform_driver_register(&msm_platform_driver); +} + +static void __exit msm_drm_unregister(void) +{ + DBG("fini"); + platform_driver_unregister(&msm_platform_driver); + hdmi_unregister(); +} + +module_init(msm_drm_register); +module_exit(msm_drm_unregister); + +MODULE_AUTHOR("Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_DRV_H__ +#define __MSM_DRV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_OF +#include +#include +#include +#endif + +#include +#include +#include + +struct msm_kms; + +#define NUM_DOMAINS 1 /* one for KMS, then one per gpu core (?) */ + +struct msm_drm_private { + + struct msm_kms *kms; + + struct drm_fb_helper *fbdev; + + /* list of GEM objects: */ + struct list_head inactive_list; + + struct workqueue_struct *wq; + + /* registered IOMMU domains: */ + unsigned int num_iommus; + struct iommu_domain *iommus[NUM_DOMAINS]; + + unsigned int num_crtcs; + struct drm_crtc *crtcs[8]; + + unsigned int num_encoders; + struct drm_encoder *encoders[8]; + + unsigned int num_connectors; + struct drm_connector *connectors[8]; +}; + +struct msm_format { + uint32_t pixel_format; +}; + +/* As there are different display controller blocks depending on the + * snapdragon version, the kms support is split out and the appropriate + * implementation is loaded at runtime. The kms module is responsible + * for constructing the appropriate planes/crtcs/encoders/connectors. + */ +struct msm_kms_funcs { + /* hw initialization: */ + int (*hw_init)(struct msm_kms *kms); + /* irq handling: */ + void (*irq_preinstall)(struct msm_kms *kms); + int (*irq_postinstall)(struct msm_kms *kms); + void (*irq_uninstall)(struct msm_kms *kms); + irqreturn_t (*irq)(struct msm_kms *kms); + int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + /* misc: */ + const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); + long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder); + /* cleanup: */ + void (*preclose)(struct msm_kms *kms, struct drm_file *file); + void (*destroy)(struct msm_kms *kms); +}; + +struct msm_kms { + const struct msm_kms_funcs *funcs; +}; + +struct msm_kms *mdp4_kms_init(struct drm_device *dev); + +int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu); +int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, + const char **names, int cnt); + +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); +int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, + uint32_t *iova); +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); +void msm_gem_put_iova(struct drm_gem_object *obj, int id); +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, + uint32_t handle); +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); +void *msm_gem_vaddr_locked(struct drm_gem_object *obj); +void *msm_gem_vaddr(struct drm_gem_object *obj); +int msm_gem_queue_inactive_work(struct drm_gem_object *obj, + struct work_struct *work); +void msm_gem_free_object(struct drm_gem_object *obj); +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, + uint32_t size, uint32_t flags, uint32_t *handle); +struct drm_gem_object *msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags); + +struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); +const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); +struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); +struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, + struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); + +struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); + +struct drm_connector *hdmi_connector_init(struct drm_device *dev, + struct drm_encoder *encoder); +void __init hdmi_register(void); +void __exit hdmi_unregister(void); + +#ifdef CONFIG_DEBUG_FS +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); +void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); +#endif + +void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname); +void msm_writel(u32 data, void __iomem *addr); +u32 msm_readl(const void __iomem *addr); + +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) +#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) + +static inline int align_pitch(int width, int bpp) +{ + int bytespp = (bpp + 7) / 8; + /* adreno needs pitch aligned to 32 pixels: */ + return bytespp * ALIGN(width, 32); +} + +/* for the generated headers: */ +#define INVALID_IDX(idx) ({BUG(); 0;}) + +#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) + +/* for conditionally setting boolean flag(s): */ +#define COND(bool, val) ((bool) ? (val) : 0) + +/* just put these here until we start adding driver private ioctls: */ +// TODO might shuffle these around.. just need something for now.. +#define MSM_BO_CACHE_MASK 0x0000000f +#define MSM_BO_SCANOUT 0x00010000 /* scanout capable */ + +#define MSM_BO_CACHED 0x00000001 /* default */ +#define MSM_BO_WC 0x0000002 +#define MSM_BO_UNCACHED 0x00000004 + + +#endif /* __MSM_DRV_H__ */ diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c new file mode 100644 index 000000000000..0286c0eeb10c --- /dev/null +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" + +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +struct msm_framebuffer { + struct drm_framebuffer base; + const struct msm_format *format; + struct drm_gem_object *planes[2]; +}; +#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) + + +static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + return drm_gem_handle_create(file_priv, + msm_fb->planes[0], handle); +} + +static void msm_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + int i, n = drm_format_num_planes(fb->pixel_format); + + DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); + + drm_framebuffer_cleanup(fb); + + for (i = 0; i < n; i++) { + struct drm_gem_object *bo = msm_fb->planes[i]; + if (bo) + drm_gem_object_unreference_unlocked(bo); + } + + kfree(msm_fb); +} + +static int msm_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned flags, unsigned color, + struct drm_clip_rect *clips, unsigned num_clips) +{ + return 0; +} + +static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { + .create_handle = msm_framebuffer_create_handle, + .destroy = msm_framebuffer_destroy, + .dirty = msm_framebuffer_dirty, +}; + +#ifdef CONFIG_DEBUG_FS +void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) +{ + struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + int i, n = drm_format_num_planes(fb->pixel_format); + + seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", + fb->width, fb->height, (char *)&fb->pixel_format, + fb->refcount.refcount.counter, fb->base.id); + + for (i = 0; i < n; i++) { + seq_printf(m, " %d: offset=%d pitch=%d, obj: ", + i, fb->offsets[i], fb->pitches[i]); + msm_gem_describe(msm_fb->planes[i], m); + } +} +#endif + +struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) +{ + struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + return msm_fb->planes[plane]; +} + +const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb) +{ + struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + return msm_fb->format; +} + +struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, + struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *bos[4] = {0}; + struct drm_framebuffer *fb; + int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); + + for (i = 0; i < n; i++) { + bos[i] = drm_gem_object_lookup(dev, file, + mode_cmd->handles[i]); + if (!bos[i]) { + ret = -ENXIO; + goto out_unref; + } + } + + fb = msm_framebuffer_init(dev, mode_cmd, bos); + if (IS_ERR(fb)) { + ret = PTR_ERR(fb); + goto out_unref; + } + + return fb; + +out_unref: + for (i = 0; i < n; i++) + drm_gem_object_unreference_unlocked(bos[i]); + return ERR_PTR(ret); +} + +struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + struct msm_framebuffer *msm_fb; + struct drm_framebuffer *fb = NULL; + const struct msm_format *format; + int ret, i, n; + unsigned int hsub, vsub; + + DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", + dev, mode_cmd, mode_cmd->width, mode_cmd->height, + (char *)&mode_cmd->pixel_format); + + n = drm_format_num_planes(mode_cmd->pixel_format); + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); + + format = kms->funcs->get_format(kms, mode_cmd->pixel_format); + if (!format) { + dev_err(dev->dev, "unsupported pixel format: %4.4s\n", + (char *)&mode_cmd->pixel_format); + ret = -EINVAL; + goto fail; + } + + msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL); + if (!msm_fb) { + ret = -ENOMEM; + goto fail; + } + + fb = &msm_fb->base; + + msm_fb->format = format; + + for (i = 0; i < n; i++) { + unsigned int width = mode_cmd->width / (i ? hsub : 1); + unsigned int height = mode_cmd->height / (i ? vsub : 1); + unsigned int min_size; + + min_size = (height - 1) * mode_cmd->pitches[i] + + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) + + mode_cmd->offsets[i]; + + if (bos[i]->size < min_size) { + ret = -EINVAL; + goto fail; + } + + msm_fb->planes[i] = bos[i]; + } + + drm_helper_mode_fill_fb_struct(fb, mode_cmd); + + ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); + if (ret) { + dev_err(dev->dev, "framebuffer init failed: %d\n", ret); + goto fail; + } + + DBG("create: FB ID: %d (%p)", fb->base.id, fb); + + return fb; + +fail: + if (fb) + msm_framebuffer_destroy(fb); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c new file mode 100644 index 000000000000..6c6d7d4c9b4e --- /dev/null +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" + +#include "drm_crtc.h" +#include "drm_fb_helper.h" + +/* + * fbdev funcs, to implement legacy fbdev interface on top of drm driver + */ + +#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base) + +struct msm_fbdev { + struct drm_fb_helper base; + struct drm_framebuffer *fb; + struct drm_gem_object *bo; +}; + +static struct fb_ops msm_fb_ops = { + .owner = THIS_MODULE, + + /* Note: to properly handle manual update displays, we wrap the + * basic fbdev ops which write to the framebuffer + */ + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static int msm_fbdev_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct msm_fbdev *fbdev = to_msm_fbdev(helper); + struct drm_device *dev = helper->dev; + struct drm_framebuffer *fb = NULL; + struct fb_info *fbi = NULL; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + dma_addr_t paddr; + int ret, size; + + /* only doing ARGB32 since this is what is needed to alpha-blend + * with video overlays: + */ + sizes->surface_bpp = 32; + sizes->surface_depth = 32; + + DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, + sizes->surface_height, sizes->surface_bpp, + sizes->fb_width, sizes->fb_height); + + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = align_pitch( + mode_cmd.width, sizes->surface_bpp); + + /* allocate backing bo */ + size = mode_cmd.pitches[0] * mode_cmd.height; + DBG("allocating %d bytes for fb %d", size, dev->primary->index); + mutex_lock(&dev->struct_mutex); + fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(fbdev->bo)) { + ret = PTR_ERR(fbdev->bo); + fbdev->bo = NULL; + dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret); + goto fail; + } + + fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo); + if (IS_ERR(fb)) { + dev_err(dev->dev, "failed to allocate fb\n"); + /* note: if fb creation failed, we can't rely on fb destroy + * to unref the bo: + */ + drm_gem_object_unreference(fbdev->bo); + ret = PTR_ERR(fb); + goto fail; + } + + mutex_lock(&dev->struct_mutex); + + /* TODO implement our own fb_mmap so we don't need this: */ + msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); + + fbi = framebuffer_alloc(0, dev->dev); + if (!fbi) { + dev_err(dev->dev, "failed to allocate fb info\n"); + ret = -ENOMEM; + goto fail_unlock; + } + + DBG("fbi=%p, dev=%p", fbi, dev); + + fbdev->fb = fb; + helper->fb = fb; + helper->fbdev = fbi; + + fbi->par = helper; + fbi->flags = FBINFO_DEFAULT; + fbi->fbops = &msm_fb_ops; + + strcpy(fbi->fix.id, "msm"); + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto fail_unlock; + } + + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); + + dev->mode_config.fb_base = paddr; + + fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + fbi->screen_size = fbdev->bo->size; + fbi->fix.smem_start = paddr; + fbi->fix.smem_len = fbdev->bo->size; + + DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); + DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); + + mutex_unlock(&dev->struct_mutex); + + return 0; + +fail_unlock: + mutex_unlock(&dev->struct_mutex); +fail: + + if (ret) { + if (fbi) + framebuffer_release(fbi); + if (fb) { + drm_framebuffer_unregister_private(fb); + drm_framebuffer_remove(fb); + } + } + + return ret; +} + +static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc, + u16 red, u16 green, u16 blue, int regno) +{ + DBG("fbdev: set gamma"); +} + +static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, int regno) +{ + DBG("fbdev: get gamma"); +} + +static struct drm_fb_helper_funcs msm_fb_helper_funcs = { + .gamma_set = msm_crtc_fb_gamma_set, + .gamma_get = msm_crtc_fb_gamma_get, + .fb_probe = msm_fbdev_create, +}; + +/* initialize fbdev helper */ +struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_fbdev *fbdev = NULL; + struct drm_fb_helper *helper; + int ret = 0; + + fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) + goto fail; + + helper = &fbdev->base; + + helper->funcs = &msm_fb_helper_funcs; + + ret = drm_fb_helper_init(dev, helper, + priv->num_crtcs, priv->num_connectors); + if (ret) { + dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret); + goto fail; + } + + drm_fb_helper_single_add_all_connectors(helper); + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + drm_fb_helper_initial_config(helper, 32); + + priv->fbdev = helper; + + return helper; + +fail: + kfree(fbdev); + return NULL; +} + +void msm_fbdev_free(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_fb_helper *helper = priv->fbdev; + struct msm_fbdev *fbdev; + struct fb_info *fbi; + + DBG(); + + fbi = helper->fbdev; + + /* only cleanup framebuffer if it is present */ + if (fbi) { + unregister_framebuffer(fbi); + framebuffer_release(fbi); + } + + drm_fb_helper_fini(helper); + + fbdev = to_msm_fbdev(priv->fbdev); + + /* this will free the backing object */ + if (fbdev->fb) { + drm_framebuffer_unregister_private(fbdev->fb); + drm_framebuffer_remove(fbdev->fb); + } + + kfree(fbdev); + + priv->fbdev = NULL; +} diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c new file mode 100644 index 000000000000..a52e6cca8403 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -0,0 +1,521 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "msm_drv.h" +#include "msm_gem.h" + + +/* called with dev->struct_mutex held */ +static struct page **get_pages(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + if (!msm_obj->pages) { + struct drm_device *dev = obj->dev; + struct page **p = drm_gem_get_pages(obj, 0); + int npages = obj->size >> PAGE_SHIFT; + + if (IS_ERR(p)) { + dev_err(dev->dev, "could not get pages: %ld\n", + PTR_ERR(p)); + return p; + } + + msm_obj->sgt = drm_prime_pages_to_sg(p, npages); + if (!msm_obj->sgt) { + dev_err(dev->dev, "failed to allocate sgt\n"); + return ERR_PTR(-ENOMEM); + } + + msm_obj->pages = p; + + /* For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent: + */ + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) + dma_map_sg(dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } + + return msm_obj->pages; +} + +static void put_pages(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + if (msm_obj->pages) { + /* For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent: + */ + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sg_free_table(msm_obj->sgt); + kfree(msm_obj->sgt); + + drm_gem_put_pages(obj, msm_obj->pages, true, false); + msm_obj->pages = NULL; + } +} + +int msm_gem_mmap_obj(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + if (msm_obj->flags & MSM_BO_WC) { + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + } else if (msm_obj->flags & MSM_BO_UNCACHED) { + vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); + } else { + /* + * Shunt off cached objs to shmem file so they have their own + * address_space (so unmap_mapping_range does what we want, + * in particular in the case of mmap'd dmabufs) + */ + fput(vma->vm_file); + get_file(obj->filp); + vma->vm_pgoff = 0; + vma->vm_file = obj->filp; + + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + } + + return 0; +} + +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) { + DBG("mmap failed: %d", ret); + return ret; + } + + return msm_gem_mmap_obj(vma->vm_private_data, vma); +} + +int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct drm_device *dev = obj->dev; + struct page **pages; + unsigned long pfn; + pgoff_t pgoff; + int ret; + + /* Make sure we don't parallel update on a fault, nor move or remove + * something from beneath our feet + */ + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + goto out; + + /* make sure we have pages attached now */ + pages = get_pages(obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out_unlock; + } + + /* We don't use vmf->pgoff since that has the fake offset: */ + pgoff = ((unsigned long)vmf->virtual_address - + vma->vm_start) >> PAGE_SHIFT; + + pfn = page_to_pfn(msm_obj->pages[pgoff]); + + VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + pfn, pfn << PAGE_SHIFT); + + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + +out_unlock: + mutex_unlock(&dev->struct_mutex); +out: + switch (ret) { + case -EAGAIN: + set_need_resched(); + case 0: + case -ERESTARTSYS: + case -EINTR: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +/** get mmap offset */ +static uint64_t mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + int ret; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + /* Make it mmapable */ + ret = drm_gem_create_mmap_offset(obj); + + if (ret) { + dev_err(dev->dev, "could not allocate mmap offset\n"); + return 0; + } + + return drm_vma_node_offset_addr(&obj->vma_node); +} + +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) +{ + uint64_t offset; + mutex_lock(&obj->dev->struct_mutex); + offset = mmap_offset(obj); + mutex_unlock(&obj->dev->struct_mutex); + return offset; +} + +/* helpers for dealing w/ iommu: */ +static int map_range(struct iommu_domain *domain, unsigned int iova, + struct sg_table *sgt, unsigned int len, int prot) +{ + struct scatterlist *sg; + unsigned int da = iova; + unsigned int i, j; + int ret; + + if (!domain || !sgt) + return -EINVAL; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa = sg_phys(sg) - sg->offset; + size_t bytes = sg->length + sg->offset; + + VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); + + ret = iommu_map(domain, da, pa, bytes, prot); + if (ret) + goto fail; + + da += bytes; + } + + return 0; + +fail: + da = iova; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes = sg->length + sg->offset; + iommu_unmap(domain, da, bytes); + da += bytes; + } + return ret; +} + +static void unmap_range(struct iommu_domain *domain, unsigned int iova, + struct sg_table *sgt, unsigned int len) +{ + struct scatterlist *sg; + unsigned int da = iova; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = sg->length + sg->offset; + size_t unmapped; + + unmapped = iommu_unmap(domain, da, bytes); + if (unmapped < bytes) + break; + + VERB("unmap[%d]: %08x(%x)", i, iova, bytes); + + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); + + da += bytes; + } +} + +/* should be called under struct_mutex.. although it can be called + * from atomic context without struct_mutex to acquire an extra + * iova ref if you know one is already held. + * + * That means when I do eventually need to add support for unpinning + * the refcnt counter needs to be atomic_t. + */ +int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, + uint32_t *iova) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + int ret = 0; + + if (!msm_obj->domain[id].iova) { + struct msm_drm_private *priv = obj->dev->dev_private; + uint32_t offset = (uint32_t)mmap_offset(obj); + struct page **pages; + pages = get_pages(obj); + if (IS_ERR(pages)) + return PTR_ERR(pages); + // XXX ideally we would not map buffers writable when not needed... + ret = map_range(priv->iommus[id], offset, msm_obj->sgt, + obj->size, IOMMU_READ | IOMMU_WRITE); + msm_obj->domain[id].iova = offset; + } + + if (!ret) + *iova = msm_obj->domain[id].iova; + + return ret; +} + +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) +{ + int ret; + mutex_lock(&obj->dev->struct_mutex); + ret = msm_gem_get_iova_locked(obj, id, iova); + mutex_unlock(&obj->dev->struct_mutex); + return ret; +} + +void msm_gem_put_iova(struct drm_gem_object *obj, int id) +{ + // XXX TODO .. + // NOTE: probably don't need a _locked() version.. we wouldn't + // normally unmap here, but instead just mark that it could be + // unmapped (if the iova refcnt drops to zero), but then later + // if another _get_iova_locked() fails we can start unmapping + // things that are no longer needed.. +} + +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + args->pitch = align_pitch(args->width, args->bpp); + args->size = PAGE_ALIGN(args->pitch * args->height); + return msm_gem_new_handle(dev, file, args->size, + MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); +} + +int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, + uint32_t handle) +{ + /* No special work needed, drop the reference and see what falls out */ + return drm_gem_handle_delete(file, handle); +} + +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset) +{ + struct drm_gem_object *obj; + int ret = 0; + + /* GEM does all our handle to object mapping */ + obj = drm_gem_object_lookup(dev, file, handle); + if (obj == NULL) { + ret = -ENOENT; + goto fail; + } + + *offset = msm_gem_mmap_offset(obj); + + drm_gem_object_unreference_unlocked(obj); + +fail: + return ret; +} + +void *msm_gem_vaddr_locked(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + if (!msm_obj->vaddr) { + struct page **pages = get_pages(obj); + if (IS_ERR(pages)) + return ERR_CAST(pages); + msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + } + return msm_obj->vaddr; +} + +void *msm_gem_vaddr(struct drm_gem_object *obj) +{ + void *ret; + mutex_lock(&obj->dev->struct_mutex); + ret = msm_gem_vaddr_locked(obj); + mutex_unlock(&obj->dev->struct_mutex); + return ret; +} + +int msm_gem_queue_inactive_work(struct drm_gem_object *obj, + struct work_struct *work) +{ + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + + /* just a place-holder until we have gpu.. */ + queue_work(priv->wq, work); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) +{ + struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + uint64_t off = drm_vma_node_start(&obj->vma_node); + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n", + msm_obj->flags, obj->name, obj->refcount.refcount.counter, + off, msm_obj->vaddr, obj->size); +} + +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) +{ + struct msm_gem_object *msm_obj; + int count = 0; + size_t size = 0; + + list_for_each_entry(msm_obj, list, mm_list) { + struct drm_gem_object *obj = &msm_obj->base; + seq_printf(m, " "); + msm_gem_describe(obj, m); + count++; + size += obj->size; + } + + seq_printf(m, "Total %d objects, %zu bytes\n", count, size); +} +#endif + +void msm_gem_free_object(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + int id; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + list_del(&msm_obj->mm_list); + + for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { + if (msm_obj->domain[id].iova) { + struct msm_drm_private *priv = obj->dev->dev_private; + uint32_t offset = (uint32_t)mmap_offset(obj); + unmap_range(priv->iommus[id], offset, + msm_obj->sgt, obj->size); + } + } + + drm_gem_free_mmap_offset(obj); + + if (msm_obj->vaddr) + vunmap(msm_obj->vaddr); + + put_pages(obj); + + drm_gem_object_release(obj); + + kfree(msm_obj); +} + +/* convenience method to construct a GEM buffer object, and userspace handle */ +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, + uint32_t size, uint32_t flags, uint32_t *handle) +{ + struct drm_gem_object *obj; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + obj = msm_gem_new(dev, size, flags); + + mutex_unlock(&dev->struct_mutex); + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + ret = drm_gem_handle_create(file, obj, handle); + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +struct drm_gem_object *msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj; + struct drm_gem_object *obj = NULL; + int ret; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + size = PAGE_ALIGN(size); + + switch (flags & MSM_BO_CACHE_MASK) { + case MSM_BO_UNCACHED: + case MSM_BO_CACHED: + case MSM_BO_WC: + break; + default: + dev_err(dev->dev, "invalid cache flag: %x\n", + (flags & MSM_BO_CACHE_MASK)); + ret = -EINVAL; + goto fail; + } + + msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); + if (!msm_obj) { + ret = -ENOMEM; + goto fail; + } + + obj = &msm_obj->base; + + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto fail; + + msm_obj->flags = flags; + + + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + + return obj; + +fail: + if (obj) + drm_gem_object_unreference_unlocked(obj); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h new file mode 100644 index 000000000000..fcafd1965151 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_GEM_H__ +#define __MSM_GEM_H__ + +#include "msm_drv.h" + +struct msm_gem_object { + struct drm_gem_object base; + + uint32_t flags; + + struct list_head mm_list; + + struct page **pages; + struct sg_table *sgt; + void *vaddr; + + struct { + // XXX + uint32_t iova; + } domain[NUM_DOMAINS]; +}; +#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) + +#endif /* __MSM_GEM_H__ */ -- cgit v1.2.3 From 902e6eb851a78ad9e3db006c1e1df71841f633e2 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 19 Jul 2013 12:52:29 -0400 Subject: drm/msm: add register definitions for gpu Generated from rnndb files in: https://github.com/freedreno/envytools Keep this split out as a separate commit to make it easier to review the actual driver. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a2xx.xml.h | 1438 ++++++++++++++++ drivers/gpu/drm/msm/adreno/a3xx.xml.h | 2193 ++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_common.xml.h | 432 +++++ drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | 254 +++ 4 files changed, 4317 insertions(+) create mode 100644 drivers/gpu/drm/msm/adreno/a2xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a3xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_common.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h new file mode 100644 index 000000000000..35463864b959 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -0,0 +1,1438 @@ +#ifndef A2XX_XML +#define A2XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) +- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a2xx_rb_dither_type { + DITHER_PIXEL = 0, + DITHER_SUBPIXEL = 1, +}; + +enum a2xx_colorformatx { + COLORX_4_4_4_4 = 0, + COLORX_1_5_5_5 = 1, + COLORX_5_6_5 = 2, + COLORX_8 = 3, + COLORX_8_8 = 4, + COLORX_8_8_8_8 = 5, + COLORX_S8_8_8_8 = 6, + COLORX_16_FLOAT = 7, + COLORX_16_16_FLOAT = 8, + COLORX_16_16_16_16_FLOAT = 9, + COLORX_32_FLOAT = 10, + COLORX_32_32_FLOAT = 11, + COLORX_32_32_32_32_FLOAT = 12, + COLORX_2_3_3 = 13, + COLORX_8_8_8 = 14, +}; + +enum a2xx_sq_surfaceformat { + FMT_1_REVERSE = 0, + FMT_1 = 1, + FMT_8 = 2, + FMT_1_5_5_5 = 3, + FMT_5_6_5 = 4, + FMT_6_5_5 = 5, + FMT_8_8_8_8 = 6, + FMT_2_10_10_10 = 7, + FMT_8_A = 8, + FMT_8_B = 9, + FMT_8_8 = 10, + FMT_Cr_Y1_Cb_Y0 = 11, + FMT_Y1_Cr_Y0_Cb = 12, + FMT_5_5_5_1 = 13, + FMT_8_8_8_8_A = 14, + FMT_4_4_4_4 = 15, + FMT_10_11_11 = 16, + FMT_11_11_10 = 17, + FMT_DXT1 = 18, + FMT_DXT2_3 = 19, + FMT_DXT4_5 = 20, + FMT_24_8 = 22, + FMT_24_8_FLOAT = 23, + FMT_16 = 24, + FMT_16_16 = 25, + FMT_16_16_16_16 = 26, + FMT_16_EXPAND = 27, + FMT_16_16_EXPAND = 28, + FMT_16_16_16_16_EXPAND = 29, + FMT_16_FLOAT = 30, + FMT_16_16_FLOAT = 31, + FMT_16_16_16_16_FLOAT = 32, + FMT_32 = 33, + FMT_32_32 = 34, + FMT_32_32_32_32 = 35, + FMT_32_FLOAT = 36, + FMT_32_32_FLOAT = 37, + FMT_32_32_32_32_FLOAT = 38, + FMT_32_AS_8 = 39, + FMT_32_AS_8_8 = 40, + FMT_16_MPEG = 41, + FMT_16_16_MPEG = 42, + FMT_8_INTERLACED = 43, + FMT_32_AS_8_INTERLACED = 44, + FMT_32_AS_8_8_INTERLACED = 45, + FMT_16_INTERLACED = 46, + FMT_16_MPEG_INTERLACED = 47, + FMT_16_16_MPEG_INTERLACED = 48, + FMT_DXN = 49, + FMT_8_8_8_8_AS_16_16_16_16 = 50, + FMT_DXT1_AS_16_16_16_16 = 51, + FMT_DXT2_3_AS_16_16_16_16 = 52, + FMT_DXT4_5_AS_16_16_16_16 = 53, + FMT_2_10_10_10_AS_16_16_16_16 = 54, + FMT_10_11_11_AS_16_16_16_16 = 55, + FMT_11_11_10_AS_16_16_16_16 = 56, + FMT_32_32_32_FLOAT = 57, + FMT_DXT3A = 58, + FMT_DXT5A = 59, + FMT_CTX1 = 60, + FMT_DXT3A_AS_1_1_1_1 = 61, +}; + +enum a2xx_sq_ps_vtx_mode { + POSITION_1_VECTOR = 0, + POSITION_2_VECTORS_UNUSED = 1, + POSITION_2_VECTORS_SPRITE = 2, + POSITION_2_VECTORS_EDGE = 3, + POSITION_2_VECTORS_KILL = 4, + POSITION_2_VECTORS_SPRITE_KILL = 5, + POSITION_2_VECTORS_EDGE_KILL = 6, + MULTIPASS = 7, +}; + +enum a2xx_sq_sample_cntl { + CENTROIDS_ONLY = 0, + CENTERS_ONLY = 1, + CENTROIDS_AND_CENTERS = 2, +}; + +enum a2xx_dx_clip_space { + DXCLIP_OPENGL = 0, + DXCLIP_DIRECTX = 1, +}; + +enum a2xx_pa_su_sc_polymode { + POLY_DISABLED = 0, + POLY_DUALMODE = 1, +}; + +enum a2xx_rb_edram_mode { + EDRAM_NOP = 0, + COLOR_DEPTH = 4, + DEPTH_ONLY = 5, + EDRAM_COPY = 6, +}; + +enum a2xx_pa_sc_pattern_bit_order { + LITTLE = 0, + BIG = 1, +}; + +enum a2xx_pa_sc_auto_reset_cntl { + NEVER = 0, + EACH_PRIMITIVE = 1, + EACH_PACKET = 2, +}; + +enum a2xx_pa_pixcenter { + PIXCENTER_D3D = 0, + PIXCENTER_OGL = 1, +}; + +enum a2xx_pa_roundmode { + TRUNCATE = 0, + ROUND = 1, + ROUNDTOEVEN = 2, + ROUNDTOODD = 3, +}; + +enum a2xx_pa_quantmode { + ONE_SIXTEENTH = 0, + ONE_EIGTH = 1, + ONE_QUARTER = 2, + ONE_HALF = 3, + ONE = 4, +}; + +enum a2xx_rb_copy_sample_select { + SAMPLE_0 = 0, + SAMPLE_1 = 1, + SAMPLE_2 = 2, + SAMPLE_3 = 3, + SAMPLE_01 = 4, + SAMPLE_23 = 5, + SAMPLE_0123 = 6, +}; + +enum sq_tex_clamp { + SQ_TEX_WRAP = 0, + SQ_TEX_MIRROR = 1, + SQ_TEX_CLAMP_LAST_TEXEL = 2, + SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3, + SQ_TEX_CLAMP_HALF_BORDER = 4, + SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5, + SQ_TEX_CLAMP_BORDER = 6, + SQ_TEX_MIRROR_ONCE_BORDER = 7, +}; + +enum sq_tex_swiz { + SQ_TEX_X = 0, + SQ_TEX_Y = 1, + SQ_TEX_Z = 2, + SQ_TEX_W = 3, + SQ_TEX_ZERO = 4, + SQ_TEX_ONE = 5, +}; + +enum sq_tex_filter { + SQ_TEX_FILTER_POINT = 0, + SQ_TEX_FILTER_BILINEAR = 1, + SQ_TEX_FILTER_BICUBIC = 2, +}; + +#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001 + +#define REG_A2XX_RBBM_CNTL 0x0000003b + +#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c + +#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0 + +#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 + +#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 + +#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 + +#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398 + +#define REG_A2XX_RBBM_DEBUG 0x0000039b + +#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c + +#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d + +#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0 + +#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1 + +#define REG_A2XX_RBBM_READ_ERROR 0x000003b3 + +#define REG_A2XX_RBBM_INT_CNTL 0x000003b4 + +#define REG_A2XX_RBBM_INT_STATUS 0x000003b5 + +#define REG_A2XX_RBBM_INT_ACK 0x000003b6 + +#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7 + +#define REG_A2XX_RBBM_PERIPHID1 0x000003f9 + +#define REG_A2XX_RBBM_PERIPHID2 0x000003fa + +#define REG_A2XX_CP_PERFMON_CNTL 0x00000444 + +#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445 + +#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446 + +#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 + +#define REG_A2XX_CP_ST_BASE 0x0000044d + +#define REG_A2XX_CP_ST_BUFSZ 0x0000044e + +#define REG_A2XX_CP_IB1_BASE 0x00000458 + +#define REG_A2XX_CP_IB1_BUFSZ 0x00000459 + +#define REG_A2XX_CP_IB2_BASE 0x0000045a + +#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b + +#define REG_A2XX_CP_STAT 0x0000047f + +#define REG_A2XX_RBBM_STATUS 0x000005d0 +#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f +#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 +static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val) +{ + return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK; +} +#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020 +#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100 +#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200 +#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400 +#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800 +#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000 +#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000 +#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000 +#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000 +#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000 +#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000 +#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000 +#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000 +#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000 +#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000 +#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000 +#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000 +#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000 +#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000 + +#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01 +#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f +#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 +#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5 +static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK; +} + +static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } + +#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38 + +#define REG_A2XX_PC_DEBUG_DATA 0x00000c39 + +#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44 + +#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80 + +#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80 + +#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81 + +#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81 + +#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86 + +#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00 + +#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01 + +#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02 + +#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05 + +#define REG_A2XX_SQ_INT_CNTL 0x00000d34 + +#define REG_A2XX_SQ_INT_STATUS 0x00000d35 + +#define REG_A2XX_SQ_INT_ACK 0x00000d36 + +#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae + +#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf + +#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0 + +#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1 + +#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2 + +#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3 + +#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4 + +#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5 + +#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6 + +#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba + +#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb + +#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0 + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1 + +#define REG_A2XX_TC_CNTL_STATUS 0x00000e00 +#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001 + +#define REG_A2XX_TP0_CHICKEN 0x00000e1e + +#define REG_A2XX_RB_BC_CONTROL 0x00000f01 +#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001 +#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006 +#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK; +} +#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008 +#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010 +#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020 +#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040 +#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080 +#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00 +#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8 +static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK; +} +#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000 +#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000 +#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000 +#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000 +#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000 +#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK; +} +#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000 +#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000 +#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK; +} +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000 +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27 +static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK; +} +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000 +#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000 +#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000 + +#define REG_A2XX_RB_EDRAM_INFO 0x00000f02 + +#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26 + +#define REG_A2XX_RB_DEBUG_DATA 0x00000f27 + +#define REG_A2XX_RB_SURFACE_INFO 0x00002000 + +#define REG_A2XX_RB_COLOR_INFO 0x00002001 +#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f +#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0 +static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val) +{ + return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK; +} +#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030 +#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4 +static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK; +} +#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040 +#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180 +#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7 +static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK; +} +#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600 +#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9 +static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK; +} +#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000 +#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12 +static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val) +{ + return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK; +} + +#define REG_A2XX_RB_DEPTH_INFO 0x00002002 +#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 +#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) +{ + return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; +} +#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000 +#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12 +static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) +{ + return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; +} + +#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005 + +#define REG_A2XX_COHER_DEST_BASE_0 0x00002006 + +#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080 +#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK; +} +#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK; +} +#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000 + +#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A2XX_UNKNOWN_2010 0x00002010 + +#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100 + +#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101 + +#define REG_A2XX_VGT_INDX_OFFSET 0x00002102 + +#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103 + +#define REG_A2XX_RB_COLOR_MASK 0x00002104 +#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001 +#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002 +#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004 +#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008 + +#define REG_A2XX_RB_BLEND_RED 0x00002105 + +#define REG_A2XX_RB_BLEND_GREEN 0x00002106 + +#define REG_A2XX_RB_BLEND_BLUE 0x00002107 + +#define REG_A2XX_RB_BLEND_ALPHA 0x00002108 + +#define REG_A2XX_RB_FOG_COLOR 0x00002109 + +#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c +#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A2XX_RB_STENCILREFMASK 0x0000210d +#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A2XX_RB_ALPHA_REF 0x0000210e + +#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f +#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110 +#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111 +#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112 +#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113 +#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114 +#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK; +} + +#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180 +#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff +#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00 +#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000 +#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000 +#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000 +#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000 +#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000 + +#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181 +#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001 +#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002 +#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c +#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2 +static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val) +{ + return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK; +} +#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00 +#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8 +static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) +{ + return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK; +} +#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000 +#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000 +#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000 + +#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182 + +#define REG_A2XX_SQ_WRAPPING_0 0x00002183 + +#define REG_A2XX_SQ_WRAPPING_1 0x00002184 + +#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6 + +#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 + +#define REG_A2XX_RB_DEPTHCONTROL 0x00002200 +#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 +#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 +#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004 +#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008 +#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070 +#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4 +static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK; +} +#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800 +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000 +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000 +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000 +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000 +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000 +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK; +} + +#define REG_A2XX_RB_BLEND_CONTROL 0x00002201 +#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f +#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0 +#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK; +} +#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00 +#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000 +#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000 + +#define REG_A2XX_RB_COLORCONTROL 0x00002202 +#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007 +#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010 +#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020 +#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040 +#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080 +#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00 +#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK; +} +#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000 +#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12 +static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK; +} +#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000 +#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14 +static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val) +{ + return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK; +} +#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK; +} + +#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK; +} + +#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204 +#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 +#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000 +#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000 +#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19 +static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val) +{ + return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK; +} +#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000 +#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000 +#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000 +#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000 +#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000 + +#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205 +#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001 +#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004 +#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018 +#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0 +#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700 +#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800 +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000 +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000 +#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000 +#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000 +#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000 +#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000 +#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000 +#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000 +#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000 +#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000 +#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000 +#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000 +#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000 + +#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206 +#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001 +#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020 +#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100 +#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200 +#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400 +#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800 + +#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK; +} + +#define REG_A2XX_RB_MODECONTROL 0x00002208 +#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007 +#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0 +static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val) +{ + return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK; +} + +#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209 + +#define REG_A2XX_RB_SAMPLE_POS 0x0000220a + +#define REG_A2XX_CLEAR_COLOR 0x0000220b +#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff +#define A2XX_CLEAR_COLOR_RED__SHIFT 0 +static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK; +} +#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00 +#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8 +static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK; +} +#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000 +#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16 +static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK; +} +#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000 +#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24 +static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK; +} + +#define REG_A2XX_A220_GRAS_CONTROL 0x00002210 + +#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280 +#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff +#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0 +static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val) +{ + return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK; +} +#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000 +#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16 +static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val) +{ + return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK; +} + +#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281 +#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK; +} +#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282 +#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff +#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0 +static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val) +{ + return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK; +} + +#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283 +#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff +#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000 +#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000 +#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000 +#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK; +} + +#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293 + +#define REG_A2XX_VGT_ENHANCE 0x00002294 + +#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300 +#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff +#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0 +static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK; +} +#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100 +#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200 +#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400 + +#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301 + +#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302 +#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001 +#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK; +} +#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006 +#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK; +} +#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380 +#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK; +} + +#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303 +#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304 +#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305 +#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306 +#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK; +} + +#define REG_A2XX_SQ_VS_CONST 0x00002307 +#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff +#define A2XX_SQ_VS_CONST_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK; +} +#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000 +#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK; +} + +#define REG_A2XX_SQ_PS_CONST 0x00002308 +#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff +#define A2XX_SQ_PS_CONST_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK; +} +#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000 +#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK; +} + +#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309 + +#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a + +#define REG_A2XX_PA_SC_AA_MASK 0x00002312 + +#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316 + +#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317 + +#define REG_A2XX_RB_COPY_CONTROL 0x00002318 +#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007 +#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val) +{ + return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK; +} +#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008 +#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0 +#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4 +static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK; +} + +#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319 + +#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a +#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff +#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val) +{ + return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK; +} + +#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b +#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007 +#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008 +#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0 +#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 +#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 +#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000 +#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000 + +#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c +#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff +#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK; +} +#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000 +#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13 +static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK; +} + +#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d + +#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324 + +#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326 + +#define REG_A2XX_A225_GRAS_UCP0X 0x00002340 + +#define REG_A2XX_A225_GRAS_UCP5W 0x00002357 + +#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360 + +#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380 + +#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383 + +#define REG_A2XX_SQ_CONSTANT_0 0x00004000 + +#define REG_A2XX_SQ_FETCH_0 0x00004800 + +#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900 + +#define REG_A2XX_SQ_CF_LOOP 0x00004908 + +#define REG_A2XX_COHER_SIZE_PM4 0x00000a29 + +#define REG_A2XX_COHER_BASE_PM4 0x00000a2a + +#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b + +#define REG_A2XX_SQ_TEX_0 0x00000000 +#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00 +#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK; +} +#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000 +#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK; +} +#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000 +#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK; +} +#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000 +#define A2XX_SQ_TEX_0_PITCH__SHIFT 22 +static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val) +{ + return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK; +} + +#define REG_A2XX_SQ_TEX_1 0x00000001 + +#define REG_A2XX_SQ_TEX_2 0x00000002 +#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff +#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK; +} +#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000 +#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13 +static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK; +} + +#define REG_A2XX_SQ_TEX_3 0x00000003 +#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e +#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070 +#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380 +#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00 +#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK; +} +#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000 +#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19 +static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK; +} +#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000 +#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21 +static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK; +} + + +#endif /* A2XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h new file mode 100644 index 000000000000..d183516067b4 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -0,0 +1,2193 @@ +#ifndef A3XX_XML +#define A3XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) +- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a3xx_render_mode { + RB_RENDERING_PASS = 0, + RB_TILING_PASS = 1, + RB_RESOLVE_PASS = 2, +}; + +enum a3xx_tile_mode { + LINEAR = 0, + TILE_32X32 = 2, +}; + +enum a3xx_threadmode { + MULTI = 0, + SINGLE = 1, +}; + +enum a3xx_instrbuffermode { + BUFFER = 1, +}; + +enum a3xx_threadsize { + TWO_QUADS = 0, + FOUR_QUADS = 1, +}; + +enum a3xx_state_block_id { + HLSQ_BLOCK_ID_TP_TEX = 2, + HLSQ_BLOCK_ID_TP_MIPMAP = 3, + HLSQ_BLOCK_ID_SP_VS = 4, + HLSQ_BLOCK_ID_SP_FS = 6, +}; + +enum a3xx_cache_opcode { + INVALIDATE = 1, +}; + +enum a3xx_vtx_fmt { + VFMT_FLOAT_32 = 0, + VFMT_FLOAT_32_32 = 1, + VFMT_FLOAT_32_32_32 = 2, + VFMT_FLOAT_32_32_32_32 = 3, + VFMT_FLOAT_16 = 4, + VFMT_FLOAT_16_16 = 5, + VFMT_FLOAT_16_16_16 = 6, + VFMT_FLOAT_16_16_16_16 = 7, + VFMT_FIXED_32 = 8, + VFMT_FIXED_32_32 = 9, + VFMT_FIXED_32_32_32 = 10, + VFMT_FIXED_32_32_32_32 = 11, + VFMT_SHORT_16 = 16, + VFMT_SHORT_16_16 = 17, + VFMT_SHORT_16_16_16 = 18, + VFMT_SHORT_16_16_16_16 = 19, + VFMT_USHORT_16 = 20, + VFMT_USHORT_16_16 = 21, + VFMT_USHORT_16_16_16 = 22, + VFMT_USHORT_16_16_16_16 = 23, + VFMT_NORM_SHORT_16 = 24, + VFMT_NORM_SHORT_16_16 = 25, + VFMT_NORM_SHORT_16_16_16 = 26, + VFMT_NORM_SHORT_16_16_16_16 = 27, + VFMT_NORM_USHORT_16 = 28, + VFMT_NORM_USHORT_16_16 = 29, + VFMT_NORM_USHORT_16_16_16 = 30, + VFMT_NORM_USHORT_16_16_16_16 = 31, + VFMT_UBYTE_8 = 40, + VFMT_UBYTE_8_8 = 41, + VFMT_UBYTE_8_8_8 = 42, + VFMT_UBYTE_8_8_8_8 = 43, + VFMT_NORM_UBYTE_8 = 44, + VFMT_NORM_UBYTE_8_8 = 45, + VFMT_NORM_UBYTE_8_8_8 = 46, + VFMT_NORM_UBYTE_8_8_8_8 = 47, + VFMT_BYTE_8 = 48, + VFMT_BYTE_8_8 = 49, + VFMT_BYTE_8_8_8 = 50, + VFMT_BYTE_8_8_8_8 = 51, + VFMT_NORM_BYTE_8 = 52, + VFMT_NORM_BYTE_8_8 = 53, + VFMT_NORM_BYTE_8_8_8 = 54, + VFMT_NORM_BYTE_8_8_8_8 = 55, + VFMT_UINT_10_10_10_2 = 60, + VFMT_NORM_UINT_10_10_10_2 = 61, + VFMT_INT_10_10_10_2 = 62, + VFMT_NORM_INT_10_10_10_2 = 63, +}; + +enum a3xx_tex_fmt { + TFMT_NORM_USHORT_565 = 4, + TFMT_NORM_USHORT_5551 = 6, + TFMT_NORM_USHORT_4444 = 7, + TFMT_NORM_UINT_X8Z24 = 10, + TFMT_NORM_UINT_NV12_UV_TILED = 17, + TFMT_NORM_UINT_NV12_Y_TILED = 19, + TFMT_NORM_UINT_NV12_UV = 21, + TFMT_NORM_UINT_NV12_Y = 23, + TFMT_NORM_UINT_I420_Y = 24, + TFMT_NORM_UINT_I420_U = 26, + TFMT_NORM_UINT_I420_V = 27, + TFMT_NORM_UINT_2_10_10_10 = 41, + TFMT_NORM_UINT_A8 = 44, + TFMT_NORM_UINT_L8_A8 = 47, + TFMT_NORM_UINT_8 = 48, + TFMT_NORM_UINT_8_8 = 49, + TFMT_NORM_UINT_8_8_8 = 50, + TFMT_NORM_UINT_8_8_8_8 = 51, + TFMT_FLOAT_16 = 64, + TFMT_FLOAT_16_16 = 65, + TFMT_FLOAT_16_16_16_16 = 67, + TFMT_FLOAT_32 = 84, + TFMT_FLOAT_32_32 = 85, + TFMT_FLOAT_32_32_32_32 = 87, +}; + +enum a3xx_tex_fetchsize { + TFETCH_DISABLE = 0, + TFETCH_1_BYTE = 1, + TFETCH_2_BYTE = 2, + TFETCH_4_BYTE = 3, + TFETCH_8_BYTE = 4, + TFETCH_16_BYTE = 5, +}; + +enum a3xx_color_fmt { + RB_R8G8B8_UNORM = 4, + RB_R8G8B8A8_UNORM = 8, + RB_Z16_UNORM = 12, + RB_A8_UNORM = 20, +}; + +enum a3xx_color_swap { + WZYX = 0, + WXYZ = 1, + ZYXW = 2, + XYZW = 3, +}; + +enum a3xx_msaa_samples { + MSAA_ONE = 0, + MSAA_TWO = 1, + MSAA_FOUR = 2, +}; + +enum a3xx_sp_perfcounter_select { + SP_FS_CFLOW_INSTRUCTIONS = 12, + SP_FS_FULL_ALU_INSTRUCTIONS = 14, + SP0_ICL1_MISSES = 26, + SP_ALU_ACTIVE_CYCLES = 29, +}; + +enum adreno_rb_copy_control_mode { + RB_COPY_RESOLVE = 1, + RB_COPY_DEPTH_STENCIL = 5, +}; + +enum a3xx_tex_filter { + A3XX_TEX_NEAREST = 0, + A3XX_TEX_LINEAR = 1, +}; + +enum a3xx_tex_clamp { + A3XX_TEX_REPEAT = 0, + A3XX_TEX_CLAMP_TO_EDGE = 1, + A3XX_TEX_MIRROR_REPEAT = 2, + A3XX_TEX_CLAMP_NONE = 3, +}; + +enum a3xx_tex_swiz { + A3XX_TEX_X = 0, + A3XX_TEX_Y = 1, + A3XX_TEX_Z = 2, + A3XX_TEX_W = 3, + A3XX_TEX_ZERO = 4, + A3XX_TEX_ONE = 5, +}; + +enum a3xx_tex_type { + A3XX_TEX_1D = 0, + A3XX_TEX_2D = 1, + A3XX_TEX_CUBE = 2, + A3XX_TEX_3D = 3, +}; + +#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 +#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 +#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 +#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020 +#define A3XX_INT0_VFD_ERROR 0x00000040 +#define A3XX_INT0_CP_SW_INT 0x00000080 +#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100 +#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200 +#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400 +#define A3XX_INT0_CP_HW_FAULT 0x00000800 +#define A3XX_INT0_CP_DMA 0x00001000 +#define A3XX_INT0_CP_IB2_INT 0x00002000 +#define A3XX_INT0_CP_IB1_INT 0x00004000 +#define A3XX_INT0_CP_RB_INT 0x00008000 +#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000 +#define A3XX_INT0_CP_RB_DONE_TS 0x00020000 +#define A3XX_INT0_CP_VS_DONE_TS 0x00040000 +#define A3XX_INT0_CP_PS_DONE_TS 0x00080000 +#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000 +#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000 +#define A3XX_INT0_MISC_HANG_DETECT 0x01000000 +#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000 +#define REG_A3XX_RBBM_HW_VERSION 0x00000000 + +#define REG_A3XX_RBBM_HW_RELEASE 0x00000001 + +#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002 + +#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010 + +#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012 + +#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018 + +#define REG_A3XX_RBBM_AHB_CTL0 0x00000020 + +#define REG_A3XX_RBBM_AHB_CTL1 0x00000021 + +#define REG_A3XX_RBBM_AHB_CMD 0x00000022 + +#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027 + +#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e + +#define REG_A3XX_RBBM_STATUS 0x00000030 +#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001 +#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 +#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 +#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000 +#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000 +#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000 +#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000 +#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000 +#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 +#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 +#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000 +#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000 +#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000 +#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000 +#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000 +#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000 +#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000 +#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000 +#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 +#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 +#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 + +#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 + +#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a + +#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 + +#define REG_A3XX_RBBM_INT_0_MASK 0x00000063 + +#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064 + +#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085 + +#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086 + +#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087 + +#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088 + +#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090 + +#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095 + +#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096 + +#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097 + +#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098 + +#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099 + +#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a + +#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b + +#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c + +#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d + +#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e + +#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f + +#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0 + +#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad + +#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae + +#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af + +#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0 + +#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1 + +#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2 + +#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3 + +#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4 + +#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5 + +#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6 + +#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7 + +#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8 + +#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba + +#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb + +#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc + +#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd + +#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be + +#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf + +#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5 + +#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6 + +#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7 + +#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8 + +#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9 + +#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca + +#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb + +#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc + +#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd + +#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce + +#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf + +#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0 + +#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1 + +#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2 + +#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3 + +#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4 + +#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5 + +#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6 + +#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7 + +#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8 + +#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9 + +#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da + +#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db + +#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc + +#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd + +#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de + +#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df + +#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0 + +#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1 + +#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2 + +#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3 + +#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4 + +#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5 + +#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea + +#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb + +#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec + +#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed + +#define REG_A3XX_RBBM_RBBM_CTL 0x00000100 + +#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111 + +#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112 + +#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9 + +#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca + +#define REG_A3XX_CP_ROQ_ADDR 0x000001cc + +#define REG_A3XX_CP_ROQ_DATA 0x000001cd + +#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1 + +#define REG_A3XX_CP_MERCIU_DATA 0x000001d2 + +#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3 + +#define REG_A3XX_CP_MEQ_ADDR 0x000001da + +#define REG_A3XX_CP_MEQ_DATA 0x000001db + +#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445 + +#define REG_A3XX_CP_HW_FAULT 0x0000045c + +#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e + +#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f + +static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; } + +static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; } + +#define REG_A3XX_CP_AHB_FAULT 0x0000054d + +#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 +#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 +#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 +#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 +#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 +#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 + +#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 +#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff +#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK; +} +#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00 +#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048 +#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049 +#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a +#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b +#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c +#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d +#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK; +} + +#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068 + +#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 + +#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c +#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff +#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) +{ + return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; +} + +#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d +#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 +#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 +#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 +#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc +#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2 +static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val) +{ + return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; +} +#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 + +#define REG_A3XX_GRAS_SC_CONTROL 0x00002072 +#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0 +#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4 +static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; +} +#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00 +#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8 +static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; +} +#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 +#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 +static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; +} + +#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A3XX_RB_MODE_CONTROL 0x000020c0 +#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080 +#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700 +#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8 +static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK; +} +#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000 +#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 + +#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 +#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 +#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 +static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK; +} +#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 +#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 +static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK; +} + +#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2 +#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400 +#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000 +#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12 +static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK; +} +#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000 +#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK; +} + +#define REG_A3XX_UNKNOWN_20C3 0x000020c3 + +static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; } + +static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; } +#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 +#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010 +#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020 +#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 +#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} +#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000 +#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12 +static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK; +} +#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 +#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 +static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; } +#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f +#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 +#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000 +#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; } +#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0 +#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4 +static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; } +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000 + +#define REG_A3XX_RB_BLEND_RED 0x000020e4 +#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK; +} +#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_GREEN 0x000020e5 +#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_BLUE 0x000020e6 +#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7 +#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + +#define REG_A3XX_UNKNOWN_20E8 0x000020e8 + +#define REG_A3XX_UNKNOWN_20E9 0x000020e9 + +#define REG_A3XX_UNKNOWN_20EA 0x000020ea + +#define REG_A3XX_UNKNOWN_20EB 0x000020eb + +#define REG_A3XX_RB_COPY_CONTROL 0x000020ec +#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 +#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; +} +#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 +#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4 +static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; +} +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00 +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10 +static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) +{ + return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed +#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0 +#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4 +static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee +#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff +#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef +#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003 +#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc +#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 +#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 +#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000 +#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK; +} + +#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 +#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002 +#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 +#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008 +#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 +#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 +static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK; +} +#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 +#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 + +#define REG_A3XX_UNKNOWN_2101 0x00002101 + +#define REG_A3XX_RB_DEPTH_INFO 0x00002102 +#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 +#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) +{ + return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; +} +#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800 +#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11 +static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) +{ + return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; +} + +#define REG_A3XX_RB_DEPTH_PITCH 0x00002103 +#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff +#define A3XX_RB_DEPTH_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val) +{ + return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK; +} + +#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104 +#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004 +#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A3XX_UNKNOWN_2105 0x00002105 + +#define REG_A3XX_UNKNOWN_2106 0x00002106 + +#define REG_A3XX_UNKNOWN_2107 0x00002107 + +#define REG_A3XX_RB_STENCILREFMASK 0x00002108 +#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109 +#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e +#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff +#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK; +} +#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000 +#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 + +#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea + +#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec +#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f +#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0 +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700 +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 + +#define REG_A3XX_PC_RESTART_INDEX 0x000021ed + +#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200 +#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010 +#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 +static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; +} +#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 +#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 +#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 +#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 +#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000 +#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 +#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 +#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 +#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 + +#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201 +#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040 +#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; +} +#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 +#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 + +#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 +#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 +#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; +} + +#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 + +#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000 +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK; +} +#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205 +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000 +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK; +} +#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206 +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK; +} +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000 +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK; +} + +#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207 +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK; +} +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000 +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK; +} + +#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a + +#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b + +#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c + +#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211 + +#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212 + +#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214 + +#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 + +#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 + +#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a + +#define REG_A3XX_VFD_CONTROL_0 0x00002240 +#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff +#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 +static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK; +} +#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000 +#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18 +static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK; +} +#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000 +#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22 +static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK; +} +#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000 +#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27 +static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK; +} + +#define REG_A3XX_VFD_CONTROL_1 0x00002241 +#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff +#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 +static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK; +} +#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 +#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 +static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK; +} +#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000 +#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24 +static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK; +} + +#define REG_A3XX_VFD_INDEX_MIN 0x00002242 + +#define REG_A3XX_VFD_INDEX_MAX 0x00002243 + +#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244 + +#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 + +static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; } + +static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } +#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f +#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80 +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000 +#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000 +#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000 +#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK; +} + +static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; } + +static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; } +#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f +#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK; +} +#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010 +#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0 +#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6 +static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000 +#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12 +static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; +} +#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 +#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 +static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK; +} +#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000 +#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000 + +#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0 +static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK; +} +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00 +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8 +static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK; +} + +#define REG_A3XX_VPC_ATTR 0x00002280 +#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff +#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0 +static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK; +} +#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000 +#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12 +static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK; +} +#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000 +#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28 +static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK; +} + +#define REG_A3XX_VPC_PACK 0x00002281 +#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00 +#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8 +static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val) +{ + return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK; +} +#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000 +#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16 +static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val) +{ + return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK; +} + +static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; } + +#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a + +#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b + +#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 +#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 +#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000 +#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 +static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; +} +#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 +#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 +static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK; +} +#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000 +#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22 +static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK; +} + +#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4 +#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 +#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 +#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 +#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 +#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK; +} + +#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5 +#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff +#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 +#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000 +#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK; +} + +#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6 +#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK; +} +#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00 +#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8 +static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; +} +#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000 +#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 +static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK; +} + +static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } +#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff +#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; } +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4 +#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 + +#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6 + +#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 + +#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8 + +#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df +#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff +#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK; +} + +#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0 +#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 +#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 +#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 +#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000 +#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK; +} + +#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1 +#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff +#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 +#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000 +#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000 +#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK; +} + +#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2 +#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 + +#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4 + +#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 + +#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6 + +#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8 + +#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 + +#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec + +static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; } +#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff +#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0 +static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK; +} +#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 + +static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; } +#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f +#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0 +static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK; +} + +#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff +#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff +#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK; +} + +#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff +#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK; +} +#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK; +} +#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK; +} + +#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341 + +#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff +#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK; +} +#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK; +} +#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK; +} + +#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343 + +#define REG_A3XX_VBIF_CLKON 0x00003001 + +#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c + +#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d + +#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e + +#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c + +#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d + +#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c + +#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d + +#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030 + +#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031 + +#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034 + +#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035 + +#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036 + +#define REG_A3XX_VBIF_ARB_CTL 0x0000303c + +#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 + +#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058 + +#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e + +#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f + +#define REG_A3XX_VSC_BIN_SIZE 0x00000c01 +#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f +#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 +#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5 +static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK; +} + +#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02 + +static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } +#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff +#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00 +#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000 +#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000 +#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK; +} + +static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } + +static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } + +#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d + +#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 + +#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49 + +#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a + +#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b + +#define REG_A3XX_UNKNOWN_0C81 0x00000c81 + +#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 + +#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89 + +#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a + +#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; } + +#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 + +#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 + +#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 + +#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0 +#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff +#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val) +{ + return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK; +} +#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000 +#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14 +static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK; +} + +#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 + +#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01 + +#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02 + +#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03 + +#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04 + +#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05 + +#define REG_A3XX_UNKNOWN_0E43 0x00000e43 + +#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44 + +#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45 + +#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61 + +#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62 + +#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64 + +#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65 + +#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82 + +#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84 + +#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85 + +#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86 + +#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87 + +#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88 + +#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89 + +#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0 +#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff +#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK; +} + +#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1 +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK; +} +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000 +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK; +} +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000 + +#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4 + +#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5 + +#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6 + +#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7 + +#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8 + +#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9 + +#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca + +#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb + +#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0 + +#define REG_A3XX_UNKNOWN_0F03 0x00000f03 + +#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04 + +#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05 + +#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06 + +#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07 + +#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08 + +#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 + +#define REG_A3XX_TEX_SAMP_0 0x00000000 +#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c +#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 +static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val) +{ + return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030 +#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4 +static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val) +{ + return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0 +#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00 +#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000 +#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 + +#define REG_A3XX_TEX_SAMP_1 0x00000001 + +#define REG_A3XX_TEX_CONST_0 0x00000000 +#define A3XX_TEX_CONST_0_TILED 0x00000001 +#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 +#define A3XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) +{ + return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK; +} +#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000 +#define A3XX_TEX_CONST_0_TYPE__SHIFT 30 +static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val) +{ + return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK; +} + +#define REG_A3XX_TEX_CONST_1 0x00000001 +#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff +#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK; +} +#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000 +#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14 +static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK; +} +#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000 +#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28 +static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val) +{ + return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK; +} + +#define REG_A3XX_TEX_CONST_2 0x00000002 +#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff +#define A3XX_TEX_CONST_2_INDX__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK; +} +#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000 +#define A3XX_TEX_CONST_2_PITCH__SHIFT 12 +static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK; +} +#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000 +#define A3XX_TEX_CONST_2_SWAP__SHIFT 30 +static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK; +} + +#define REG_A3XX_TEX_CONST_3 0x00000003 + + +#endif /* A3XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h new file mode 100644 index 000000000000..61979d458ac0 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -0,0 +1,432 @@ +#ifndef ADRENO_COMMON_XML +#define ADRENO_COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) +- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum adreno_pa_su_sc_draw { + PC_DRAW_POINTS = 0, + PC_DRAW_LINES = 1, + PC_DRAW_TRIANGLES = 2, +}; + +enum adreno_compare_func { + FUNC_NEVER = 0, + FUNC_LESS = 1, + FUNC_EQUAL = 2, + FUNC_LEQUAL = 3, + FUNC_GREATER = 4, + FUNC_NOTEQUAL = 5, + FUNC_GEQUAL = 6, + FUNC_ALWAYS = 7, +}; + +enum adreno_stencil_op { + STENCIL_KEEP = 0, + STENCIL_ZERO = 1, + STENCIL_REPLACE = 2, + STENCIL_INCR_CLAMP = 3, + STENCIL_DECR_CLAMP = 4, + STENCIL_INVERT = 5, + STENCIL_INCR_WRAP = 6, + STENCIL_DECR_WRAP = 7, +}; + +enum adreno_rb_blend_factor { + FACTOR_ZERO = 0, + FACTOR_ONE = 1, + FACTOR_SRC_COLOR = 4, + FACTOR_ONE_MINUS_SRC_COLOR = 5, + FACTOR_SRC_ALPHA = 6, + FACTOR_ONE_MINUS_SRC_ALPHA = 7, + FACTOR_DST_COLOR = 8, + FACTOR_ONE_MINUS_DST_COLOR = 9, + FACTOR_DST_ALPHA = 10, + FACTOR_ONE_MINUS_DST_ALPHA = 11, + FACTOR_CONSTANT_COLOR = 12, + FACTOR_ONE_MINUS_CONSTANT_COLOR = 13, + FACTOR_CONSTANT_ALPHA = 14, + FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15, + FACTOR_SRC_ALPHA_SATURATE = 16, +}; + +enum adreno_rb_blend_opcode { + BLEND_DST_PLUS_SRC = 0, + BLEND_SRC_MINUS_DST = 1, + BLEND_MIN_DST_SRC = 2, + BLEND_MAX_DST_SRC = 3, + BLEND_DST_MINUS_SRC = 4, + BLEND_DST_PLUS_SRC_BIAS = 5, +}; + +enum adreno_rb_surface_endian { + ENDIAN_NONE = 0, + ENDIAN_8IN16 = 1, + ENDIAN_8IN32 = 2, + ENDIAN_16IN32 = 3, + ENDIAN_8IN64 = 4, + ENDIAN_8IN128 = 5, +}; + +enum adreno_rb_dither_mode { + DITHER_DISABLE = 0, + DITHER_ALWAYS = 1, + DITHER_IF_ALPHA_OFF = 2, +}; + +enum adreno_rb_depth_format { + DEPTHX_16 = 0, + DEPTHX_24_8 = 1, +}; + +enum adreno_mmu_clnt_beh { + BEH_NEVR = 0, + BEH_TRAN_RNG = 1, + BEH_TRAN_FLT = 2, +}; + +#define REG_AXXX_MH_MMU_CONFIG 0x00000040 +#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 +#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 +#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 +#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 +static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 +#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 +#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 +#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 +#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 +#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 +#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 +static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 +#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 +static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 +#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 +static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 +#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 +static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; +} +#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 +#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 +static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; +} + +#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041 + +#define REG_AXXX_MH_MMU_PT_BASE 0x00000042 + +#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043 + +#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044 + +#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045 + +#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046 + +#define REG_AXXX_MH_MMU_MPU_END 0x00000047 + +#define REG_AXXX_CP_RB_BASE 0x000001c0 + +#define REG_AXXX_CP_RB_CNTL 0x000001c1 +#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f +#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0 +static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK; +} +#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00 +#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8 +static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK; +} +#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000 +#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16 +static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK; +} +#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000 +#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000 +#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000 + +#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3 +#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003 +#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0 +static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val) +{ + return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK; +} +#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc +#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2 +static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val) +{ + return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK; +} + +#define REG_AXXX_CP_RB_RPTR 0x000001c4 + +#define REG_AXXX_CP_RB_WPTR 0x000001c5 + +#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6 + +#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7 + +#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8 + +#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK; +} +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK; +} +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK; +} + +#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 + +#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 +#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f +#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK; +} +#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00 +#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8 +static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK; +} +#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000 +#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK; +} + +#define REG_AXXX_CP_STQ_AVAIL 0x000001d8 +#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f +#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0 +static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val) +{ + return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK; +} + +#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9 +#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f +#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0 +static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val) +{ + return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK; +} + +#define REG_AXXX_SCRATCH_UMSK 0x000001dc +#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff +#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0 +static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val) +{ + return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK; +} +#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000 +#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16 +static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val) +{ + return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK; +} + +#define REG_AXXX_SCRATCH_ADDR 0x000001dd + +#define REG_AXXX_CP_ME_RDADDR 0x000001ea + +#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec + +#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed + +#define REG_AXXX_CP_INT_CNTL 0x000001f2 + +#define REG_AXXX_CP_INT_STATUS 0x000001f3 + +#define REG_AXXX_CP_INT_ACK 0x000001f4 + +#define REG_AXXX_CP_ME_CNTL 0x000001f6 + +#define REG_AXXX_CP_ME_STATUS 0x000001f7 + +#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8 + +#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9 + +#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa + +#define REG_AXXX_CP_DEBUG 0x000001fc +#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000 +#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000 +#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000 +#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000 +#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000 +#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000 +#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000 +#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000 + +#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd +#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe +#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff +#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_SCRATCH_REG0 0x00000578 + +#define REG_AXXX_CP_SCRATCH_REG1 0x00000579 + +#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a + +#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b + +#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c + +#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d + +#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e + +#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f + +#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a + +#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b + +#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c + +#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d + +#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e + + +#endif /* ADRENO_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h new file mode 100644 index 000000000000..94c13f418e75 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -0,0 +1,254 @@ +#ifndef ADRENO_PM4_XML +#define ADRENO_PM4_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) +- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) +- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) + +Copyright (C) 2013 by the following authors: +- Rob Clark (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum vgt_event_type { + VS_DEALLOC = 0, + PS_DEALLOC = 1, + VS_DONE_TS = 2, + PS_DONE_TS = 3, + CACHE_FLUSH_TS = 4, + CONTEXT_DONE = 5, + CACHE_FLUSH = 6, + HLSQ_FLUSH = 7, + VIZQUERY_START = 7, + VIZQUERY_END = 8, + SC_WAIT_WC = 9, + RST_PIX_CNT = 13, + RST_VTX_CNT = 14, + TILE_FLUSH = 15, + CACHE_FLUSH_AND_INV_TS_EVENT = 20, + ZPASS_DONE = 21, + CACHE_FLUSH_AND_INV_EVENT = 22, + PERFCOUNTER_START = 23, + PERFCOUNTER_STOP = 24, + VS_FETCH_DONE = 27, + FACENESS_FLUSH = 28, +}; + +enum pc_di_primtype { + DI_PT_NONE = 0, + DI_PT_POINTLIST = 1, + DI_PT_LINELIST = 2, + DI_PT_LINESTRIP = 3, + DI_PT_TRILIST = 4, + DI_PT_TRIFAN = 5, + DI_PT_TRISTRIP = 6, + DI_PT_RECTLIST = 8, + DI_PT_QUADLIST = 13, + DI_PT_QUADSTRIP = 14, + DI_PT_POLYGON = 15, + DI_PT_2D_COPY_RECT_LIST_V0 = 16, + DI_PT_2D_COPY_RECT_LIST_V1 = 17, + DI_PT_2D_COPY_RECT_LIST_V2 = 18, + DI_PT_2D_COPY_RECT_LIST_V3 = 19, + DI_PT_2D_FILL_RECT_LIST = 20, + DI_PT_2D_LINE_STRIP = 21, + DI_PT_2D_TRI_STRIP = 22, +}; + +enum pc_di_src_sel { + DI_SRC_SEL_DMA = 0, + DI_SRC_SEL_IMMEDIATE = 1, + DI_SRC_SEL_AUTO_INDEX = 2, + DI_SRC_SEL_RESERVED = 3, +}; + +enum pc_di_index_size { + INDEX_SIZE_IGN = 0, + INDEX_SIZE_16_BIT = 0, + INDEX_SIZE_32_BIT = 1, + INDEX_SIZE_8_BIT = 2, + INDEX_SIZE_INVALID = 0, +}; + +enum pc_di_vis_cull_mode { + IGNORE_VISIBILITY = 0, +}; + +enum adreno_pm4_packet_type { + CP_TYPE0_PKT = 0, + CP_TYPE1_PKT = 0x40000000, + CP_TYPE2_PKT = 0x80000000, + CP_TYPE3_PKT = 0xc0000000, +}; + +enum adreno_pm4_type3_packets { + CP_ME_INIT = 72, + CP_NOP = 16, + CP_INDIRECT_BUFFER = 63, + CP_INDIRECT_BUFFER_PFD = 55, + CP_WAIT_FOR_IDLE = 38, + CP_WAIT_REG_MEM = 60, + CP_WAIT_REG_EQ = 82, + CP_WAT_REG_GTE = 83, + CP_WAIT_UNTIL_READ = 92, + CP_WAIT_IB_PFD_COMPLETE = 93, + CP_REG_RMW = 33, + CP_SET_BIN_DATA = 47, + CP_REG_TO_MEM = 62, + CP_MEM_WRITE = 61, + CP_MEM_WRITE_CNTR = 79, + CP_COND_EXEC = 68, + CP_COND_WRITE = 69, + CP_EVENT_WRITE = 70, + CP_EVENT_WRITE_SHD = 88, + CP_EVENT_WRITE_CFL = 89, + CP_EVENT_WRITE_ZPD = 91, + CP_RUN_OPENCL = 49, + CP_DRAW_INDX = 34, + CP_DRAW_INDX_2 = 54, + CP_DRAW_INDX_BIN = 52, + CP_DRAW_INDX_2_BIN = 53, + CP_VIZ_QUERY = 35, + CP_SET_STATE = 37, + CP_SET_CONSTANT = 45, + CP_IM_LOAD = 39, + CP_IM_LOAD_IMMEDIATE = 43, + CP_LOAD_CONSTANT_CONTEXT = 46, + CP_INVALIDATE_STATE = 59, + CP_SET_SHADER_BASES = 74, + CP_SET_BIN_MASK = 80, + CP_SET_BIN_SELECT = 81, + CP_CONTEXT_UPDATE = 94, + CP_INTERRUPT = 64, + CP_IM_STORE = 44, + CP_SET_BIN_BASE_OFFSET = 75, + CP_SET_DRAW_INIT_FLAGS = 75, + CP_SET_PROTECTED_MODE = 95, + CP_LOAD_STATE = 48, + CP_COND_INDIRECT_BUFFER_PFE = 58, + CP_COND_INDIRECT_BUFFER_PFD = 50, + CP_INDIRECT_BUFFER_PFE = 63, + CP_SET_BIN = 76, +}; + +enum adreno_state_block { + SB_VERT_TEX = 0, + SB_VERT_MIPADDR = 1, + SB_FRAG_TEX = 2, + SB_FRAG_MIPADDR = 3, + SB_VERT_SHADER = 4, + SB_FRAG_SHADER = 6, +}; + +enum adreno_state_type { + ST_SHADER = 0, + ST_CONSTANTS = 1, +}; + +enum adreno_state_src { + SS_DIRECT = 0, + SS_INDIRECT = 4, +}; + +#define REG_CP_LOAD_STATE_0 0x00000000 +#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff +#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 +static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val) +{ + return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK; +} +#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000 +#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16 +static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val) +{ + return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK; +} +#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000 +#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19 +static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val) +{ + return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK; +} +#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000 +#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22 +static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val) +{ + return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK; +} + +#define REG_CP_LOAD_STATE_1 0x00000001 +#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003 +#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0 +static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val) +{ + return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK; +} +#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc +#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2 +static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val) +{ + return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; +} + +#define REG_CP_SET_BIN_0 0x00000000 + +#define REG_CP_SET_BIN_1 0x00000001 +#define CP_SET_BIN_1_X1__MASK 0x0000ffff +#define CP_SET_BIN_1_X1__SHIFT 0 +static inline uint32_t CP_SET_BIN_1_X1(uint32_t val) +{ + return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK; +} +#define CP_SET_BIN_1_Y1__MASK 0xffff0000 +#define CP_SET_BIN_1_Y1__SHIFT 16 +static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val) +{ + return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK; +} + +#define REG_CP_SET_BIN_2 0x00000002 +#define CP_SET_BIN_2_X2__MASK 0x0000ffff +#define CP_SET_BIN_2_X2__SHIFT 0 +static inline uint32_t CP_SET_BIN_2_X2(uint32_t val) +{ + return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK; +} +#define CP_SET_BIN_2_Y2__MASK 0xffff0000 +#define CP_SET_BIN_2_Y2__SHIFT 16 +static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val) +{ + return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK; +} + + +#endif /* ADRENO_PM4_XML */ -- cgit v1.2.3 From 7198e6b03155f6dadecadba004eb83b81a6ffe4c Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 19 Jul 2013 12:59:32 -0400 Subject: drm/msm: add a3xx gpu support Add initial support for a3xx 3d core. So far, with hardware that I've seen to date, we can have: + zero, one, or two z180 2d cores + a3xx or a2xx 3d core, which share a common CP (the firmware for the CP seems to implement some different PM4 packet types but the basics of cmdstream submission are the same) Which means that the eventual complete "class" hierarchy, once support for all past and present hw is in place, becomes: + msm_gpu + adreno_gpu + a3xx_gpu + a2xx_gpu + z180_gpu This commit splits out the parts that will eventually be common between a2xx/a3xx into adreno_gpu, and the parts that are even common to z180 into msm_gpu. Note that there is no cmdstream validation required. All memory access from the GPU is via IOMMU/MMU. So as long as you don't map silly things to the GPU, there isn't much damage that the GPU can do. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Makefile | 7 +- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 501 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/a3xx_gpu.h | 30 ++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 350 ++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.h | 142 +++++++++ drivers/gpu/drm/msm/msm_drv.c | 246 +++++++++++++++- drivers/gpu/drm/msm/msm_drv.h | 44 ++- drivers/gpu/drm/msm/msm_gem.c | 84 +++++- drivers/gpu/drm/msm/msm_gem.h | 58 ++++ drivers/gpu/drm/msm/msm_gem_submit.c | 412 ++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.c | 411 ++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.h | 114 ++++++++ drivers/gpu/drm/msm/msm_ringbuffer.c | 61 ++++ drivers/gpu/drm/msm/msm_ringbuffer.h | 43 +++ include/uapi/drm/Kbuild | 1 + include/uapi/drm/msm_drm.h | 207 +++++++++++++ 16 files changed, 2695 insertions(+), 16 deletions(-) create mode 100644 drivers/gpu/drm/msm/adreno/a3xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a3xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/adreno_gpu.h create mode 100644 drivers/gpu/drm/msm/msm_gem_submit.c create mode 100644 drivers/gpu/drm/msm/msm_gpu.c create mode 100644 drivers/gpu/drm/msm/msm_gpu.h create mode 100644 drivers/gpu/drm/msm/msm_ringbuffer.c create mode 100644 drivers/gpu/drm/msm/msm_ringbuffer.h create mode 100644 include/uapi/drm/msm_drm.h diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 4068122a9377..439dfb5b417b 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -4,6 +4,8 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) endif msm-y := \ + adreno/adreno_gpu.o \ + adreno/a3xx_gpu.o \ hdmi/hdmi.o \ hdmi/hdmi_connector.o \ hdmi/hdmi_i2c.o \ @@ -18,7 +20,10 @@ msm-y := \ msm_connector.o \ msm_drv.o \ msm_fb.o \ - msm_gem.o + msm_gem.o \ + msm_gem_submit.o \ + msm_gpu.o \ + msm_ringbuffer.o msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c new file mode 100644 index 000000000000..13d61bbed302 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -0,0 +1,501 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "a3xx_gpu.h" + +#define A3XX_INT0_MASK \ + (A3XX_INT0_RBBM_AHB_ERROR | \ + A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \ + A3XX_INT0_CP_T0_PACKET_IN_IB | \ + A3XX_INT0_CP_OPCODE_ERROR | \ + A3XX_INT0_CP_RESERVED_BIT_ERROR | \ + A3XX_INT0_CP_HW_FAULT | \ + A3XX_INT0_CP_IB1_INT | \ + A3XX_INT0_CP_IB2_INT | \ + A3XX_INT0_CP_RB_INT | \ + A3XX_INT0_CP_REG_PROTECT_FAULT | \ + A3XX_INT0_CP_AHB_ERROR_HALT | \ + A3XX_INT0_UCHE_OOB_ACCESS) + +static struct platform_device *a3xx_pdev; + +static void a3xx_me_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb; + + OUT_PKT3(ring, CP_ME_INIT, 17); + OUT_RING(ring, 0x000003f7); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000080); + OUT_RING(ring, 0x00000100); + OUT_RING(ring, 0x00000180); + OUT_RING(ring, 0x00006600); + OUT_RING(ring, 0x00000150); + OUT_RING(ring, 0x0000014e); + OUT_RING(ring, 0x00000154); + OUT_RING(ring, 0x00000001); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + gpu->funcs->flush(gpu); + gpu->funcs->idle(gpu); +} + +static int a3xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + uint32_t *ptr, len; + int i, ret; + + DBG("%s", gpu->name); + + if (adreno_is_a305(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); + + } else if (adreno_is_a320(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); + /* Enable 1K sort: */ + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); + + } else if (adreno_is_a330(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff); + /* Enable 1K sort: */ + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff); + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); + /* Disable VBIF clock gating. This is to enable AXI running + * higher frequency than GPU: + */ + gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001); + + } else { + BUG(); + } + + /* Make all blocks contribute to the GPU BUSY perf counter: */ + gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); + + /* Tune the hystersis counters for SP and CP idle detection: */ + gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10); + gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); + + /* Enable the RBBM error reporting bits. This lets us get + * useful information on failure: + */ + gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001); + + /* Enable AHB error reporting: */ + gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff); + + /* Turn on the power counters: */ + gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000); + + /* Turn on hang detection - this spews a lot of useful information + * into the RBBM registers on a hang: + */ + gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff); + + /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ + gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); + + /* Enable Clock gating: */ + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); + + /* Set the OCMEM base address for A330 */ +//TODO: +// if (adreno_is_a330(adreno_gpu)) { +// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, +// (unsigned int)(a3xx_gpu->ocmem_base >> 14)); +// } + + /* Turn on performance counters: */ + gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); + + /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS + * we will use this to augment our hang detection: + */ + gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT, + SP_FS_FULL_ALU_INSTRUCTIONS); + + gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + /* setup access protection: */ + gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); + + /* RBBM registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040); + gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080); + gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc); + gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108); + gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140); + gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400); + + /* CP registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700); + gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8); + gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0); + gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178); + gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180); + + /* RB registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300); + + /* VBIF registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000); + + /* NOTE: PM4/micro-engine firmware registers look to be the same + * for a2xx and a3xx.. we could possibly push that part down to + * adreno_gpu base class. Or push both PM4 and PFP but + * parameterize the pfp ucode addr/data registers.. + */ + + /* Load PM4: */ + ptr = (uint32_t *)(adreno_gpu->pm4->data); + len = adreno_gpu->pm4->size / 4; + DBG("loading PM4 ucode version: %u", ptr[0]); + + gpu_write(gpu, REG_AXXX_CP_DEBUG, + AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | + AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE); + gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); + + /* Load PFP: */ + ptr = (uint32_t *)(adreno_gpu->pfp->data); + len = adreno_gpu->pfp->size / 4; + DBG("loading PFP ucode version: %u", ptr[0]); + + gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); + + /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ + if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) + gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, + AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | + AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | + AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); + + + /* clear ME_HALT to start micro engine */ + gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); + + a3xx_me_init(gpu); + + return 0; +} + +static void a3xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + adreno_gpu_cleanup(adreno_gpu); + put_device(&a3xx_gpu->pdev->dev); + kfree(a3xx_gpu); +} + +static void a3xx_idle(struct msm_gpu *gpu) +{ + unsigned long t; + + /* wait for ringbuffer to drain: */ + adreno_idle(gpu); + + t = jiffies + ADRENO_IDLE_TIMEOUT; + + /* then wait for GPU to finish: */ + do { + uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS); + if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY)) + return; + } while(time_before(jiffies, t)); + + DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name); + + /* TODO maybe we need to reset GPU here to recover from hang? */ +} + +static irqreturn_t a3xx_irq(struct msm_gpu *gpu) +{ + uint32_t status; + + status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS); + DBG("%s: %08x", gpu->name, status); + + // TODO + + gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status); + + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_DEBUG_FS +static const unsigned int a3xx_registers[] = { + 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, + 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, + 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5, + 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1, + 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd, + 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff, + 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f, + 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f, + 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e, + 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f, + 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7, + 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05, + 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65, + 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7, + 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09, + 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069, + 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075, + 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109, + 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115, + 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0, + 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e, + 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, + 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, + 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356, + 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d, + 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472, + 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef, + 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511, + 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed, + 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a, + 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce, + 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec, + 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749, + 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d, + 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036, + 0x303c, 0x303c, 0x305e, 0x305f, +}; + +static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) +{ + int i; + + adreno_show(gpu, m); + seq_printf(m, "status: %08x\n", + gpu_read(gpu, REG_A3XX_RBBM_STATUS)); + + /* dump these out in a form that can be parsed by demsm: */ + seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); + for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) { + uint32_t start = a3xx_registers[i]; + uint32_t end = a3xx_registers[i+1]; + uint32_t addr; + + for (addr = start; addr <= end; addr++) { + uint32_t val = gpu_read(gpu, addr); + seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); + } + } +} +#endif + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .hw_init = a3xx_hw_init, + .pm_suspend = msm_gpu_pm_suspend, + .pm_resume = msm_gpu_pm_resume, + .last_fence = adreno_last_fence, + .submit = adreno_submit, + .flush = adreno_flush, + .idle = a3xx_idle, + .irq = a3xx_irq, + .destroy = a3xx_destroy, +#ifdef CONFIG_DEBUG_FS + .show = a3xx_show, +#endif + }, +}; + +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) +{ + struct a3xx_gpu *a3xx_gpu = NULL; + struct msm_gpu *gpu; + struct platform_device *pdev = a3xx_pdev; + struct adreno_platform_config *config; + int ret; + + if (!pdev) { + dev_err(dev->dev, "no a3xx device\n"); + ret = -ENXIO; + goto fail; + } + + config = pdev->dev.platform_data; + + a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL); + if (!a3xx_gpu) { + ret = -ENOMEM; + goto fail; + } + + gpu = &a3xx_gpu->base.base; + + get_device(&pdev->dev); + a3xx_gpu->pdev = pdev; + + gpu->fast_rate = config->fast_rate; + gpu->slow_rate = config->slow_rate; + gpu->bus_freq = config->bus_freq; + + DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", + gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); + + ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base, + &funcs, config->rev); + if (ret) + goto fail; + + return &a3xx_gpu->base.base; + +fail: + if (a3xx_gpu) + a3xx_destroy(&a3xx_gpu->base.base); + + return ERR_PTR(ret); +} + +/* + * The a3xx device: + */ + +static int a3xx_probe(struct platform_device *pdev) +{ + static struct adreno_platform_config config = {}; +#ifdef CONFIG_OF + /* TODO */ +#else + uint32_t version = socinfo_get_version(); + if (cpu_is_apq8064ab()) { + config.fast_rate = 450000000; + config.slow_rate = 27000000; + config.bus_freq = 4; + config.rev = ADRENO_REV(3, 2, 1, 0); + } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) { + config.fast_rate = 400000000; + config.slow_rate = 27000000; + config.bus_freq = 4; + + if (SOCINFO_VERSION_MAJOR(version) == 2) + config.rev = ADRENO_REV(3, 2, 0, 2); + else if ((SOCINFO_VERSION_MAJOR(version) == 1) && + (SOCINFO_VERSION_MINOR(version) == 1)) + config.rev = ADRENO_REV(3, 2, 0, 1); + else + config.rev = ADRENO_REV(3, 2, 0, 0); + + } else if (cpu_is_msm8930()) { + config.fast_rate = 400000000; + config.slow_rate = 27000000; + config.bus_freq = 3; + + if ((SOCINFO_VERSION_MAJOR(version) == 1) && + (SOCINFO_VERSION_MINOR(version) == 2)) + config.rev = ADRENO_REV(3, 0, 5, 2); + else + config.rev = ADRENO_REV(3, 0, 5, 0); + + } +#endif + pdev->dev.platform_data = &config; + a3xx_pdev = pdev; + return 0; +} + +static int a3xx_remove(struct platform_device *pdev) +{ + a3xx_pdev = NULL; + return 0; +} + +static struct platform_driver a3xx_driver = { + .probe = a3xx_probe, + .remove = a3xx_remove, + .driver.name = "kgsl-3d0", +}; + +void __init a3xx_register(void) +{ + platform_driver_register(&a3xx_driver); +} + +void __exit a3xx_unregister(void) +{ + platform_driver_unregister(&a3xx_driver); +} diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h new file mode 100644 index 000000000000..32c398c2d00a --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __A3XX_GPU_H__ +#define __A3XX_GPU_H__ + +#include "adreno_gpu.h" +#include "a3xx.xml.h" + +struct a3xx_gpu { + struct adreno_gpu base; + struct platform_device *pdev; +}; +#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) + +#endif /* __A3XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c new file mode 100644 index 000000000000..282163ee3fa5 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -0,0 +1,350 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "adreno_gpu.h" +#include "msm_gem.h" + +struct adreno_info { + struct adreno_rev rev; + uint32_t revn; + const char *name; + const char *pm4fw, *pfpfw; + uint32_t gmem; +}; + +#define ANY_ID 0xff + +static const struct adreno_info gpulist[] = { + { + .rev = ADRENO_REV(3, 0, 5, ANY_ID), + .revn = 305, + .name = "A305", + .pm4fw = "a300_pm4.fw", + .pfpfw = "a300_pfp.fw", + .gmem = SZ_256K, + }, { + .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), + .revn = 320, + .name = "A320", + .pm4fw = "a300_pm4.fw", + .pfpfw = "a300_pfp.fw", + .gmem = SZ_512K, + }, { + .rev = ADRENO_REV(3, 3, 0, 0), + .revn = 330, + .name = "A330", + .pm4fw = "a330_pm4.fw", + .pfpfw = "a330_pfp.fw", + .gmem = SZ_1M, + }, +}; + +#define RB_SIZE SZ_32K +#define RB_BLKSIZE 16 + +int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + switch (param) { + case MSM_PARAM_GPU_ID: + *value = adreno_gpu->info->revn; + return 0; + case MSM_PARAM_GMEM_SIZE: + *value = adreno_gpu->info->gmem; + return 0; + default: + DBG("%s: invalid param: %u", gpu->name, param); + return -EINVAL; + } +} + +#define rbmemptr(adreno_gpu, member) \ + ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) + +int adreno_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + DBG("%s", gpu->name); + + /* Setup REG_CP_RB_CNTL: */ + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + /* size is log2(quad-words): */ + AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | + AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE)); + + /* Setup ringbuffer address: */ + gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); + gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr)); + + /* Setup scratch/timestamp: */ + gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence)); + + gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1); + + return 0; +} + +static uint32_t get_wptr(struct msm_ringbuffer *ring) +{ + return ring->cur - ring->start; +} + +uint32_t adreno_last_fence(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + return adreno_gpu->memptrs->fence; +} + +int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = gpu->rb; + unsigned i, ibs = 0; + + adreno_gpu->last_fence = submit->fence; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == ctx) + break; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, submit->cmd[i].iova); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + } + + /* on a320, at least, we seem to need to pad things out to an + * even number of qwords to avoid issue w/ CP hanging on wrap- + * around: + */ + if (ibs % 2) + OUT_PKT2(ring); + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->fence); + + if (adreno_is_a3xx(adreno_gpu)) { + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + } + + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, rbmemptr(adreno_gpu, fence)); + OUT_RING(ring, submit->fence); + + /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ + OUT_PKT3(ring, CP_INTERRUPT, 1); + OUT_RING(ring, 0x80000000); + +#if 0 + if (adreno_is_a3xx(adreno_gpu)) { + /* Dummy set-constant to trigger context rollover */ + OUT_PKT3(ring, CP_SET_CONSTANT, 2); + OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); + OUT_RING(ring, 0x00000000); + } +#endif + + gpu->funcs->flush(gpu); + + return 0; +} + +void adreno_flush(struct msm_gpu *gpu) +{ + uint32_t wptr = get_wptr(gpu->rb); + + /* ensure writes to ringbuffer have hit system memory: */ + mb(); + + gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr); +} + +void adreno_idle(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + uint32_t rptr, wptr = get_wptr(gpu->rb); + unsigned long t; + + t = jiffies + ADRENO_IDLE_TIMEOUT; + + /* then wait for CP to drain ringbuffer: */ + do { + rptr = adreno_gpu->memptrs->rptr; + if (rptr == wptr) + return; + } while(time_before(jiffies, t)); + + DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); + + /* TODO maybe we need to reset GPU here to recover from hang? */ +} + +#ifdef CONFIG_DEBUG_FS +void adreno_show(struct msm_gpu *gpu, struct seq_file *m) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", + adreno_gpu->info->revn, adreno_gpu->rev.core, + adreno_gpu->rev.major, adreno_gpu->rev.minor, + adreno_gpu->rev.patchid); + + seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, + adreno_gpu->last_fence); + seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); + seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); + seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); +} +#endif + +void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + uint32_t freedwords; + do { + uint32_t size = gpu->rb->size / 4; + uint32_t wptr = get_wptr(gpu->rb); + uint32_t rptr = adreno_gpu->memptrs->rptr; + freedwords = (rptr + (size - 1) - wptr) % size; + } while(freedwords < ndwords); +} + +static const char *iommu_ports[] = { + "gfx3d_user", "gfx3d_priv", + "gfx3d1_user", "gfx3d1_priv", +}; + +static inline bool _rev_match(uint8_t entry, uint8_t id) +{ + return (entry == ANY_ID) || (entry == id); +} + +int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, + struct adreno_rev rev) +{ + int i, ret; + + /* identify gpu: */ + for (i = 0; i < ARRAY_SIZE(gpulist); i++) { + const struct adreno_info *info = &gpulist[i]; + if (_rev_match(info->rev.core, rev.core) && + _rev_match(info->rev.major, rev.major) && + _rev_match(info->rev.minor, rev.minor) && + _rev_match(info->rev.patchid, rev.patchid)) { + gpu->info = info; + gpu->revn = info->revn; + break; + } + } + + if (i == ARRAY_SIZE(gpulist)) { + dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n", + rev.core, rev.major, rev.minor, rev.patchid); + return -ENXIO; + } + + DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name, + rev.core, rev.major, rev.minor, rev.patchid); + + gpu->funcs = funcs; + gpu->rev = rev; + + ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); + if (ret) { + dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", + gpu->info->pm4fw, ret); + return ret; + } + + ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev); + if (ret) { + dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", + gpu->info->pfpfw, ret); + return ret; + } + + ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base, + gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", + RB_SIZE); + if (ret) + return ret; + + ret = msm_iommu_attach(drm, gpu->base.iommu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + if (ret) + return ret; + + gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), + MSM_BO_UNCACHED); + if (IS_ERR(gpu->memptrs_bo)) { + ret = PTR_ERR(gpu->memptrs_bo); + gpu->memptrs_bo = NULL; + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); + return ret; + } + + gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); + if (!gpu->memptrs) { + dev_err(drm->dev, "could not vmap memptrs\n"); + return -ENOMEM; + } + + ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, + &gpu->memptrs_iova); + if (ret) { + dev_err(drm->dev, "could not map memptrs: %d\n", ret); + return ret; + } + + return 0; +} + +void adreno_gpu_cleanup(struct adreno_gpu *gpu) +{ + if (gpu->memptrs_bo) { + if (gpu->memptrs_iova) + msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); + drm_gem_object_unreference(gpu->memptrs_bo); + } + if (gpu->pm4) + release_firmware(gpu->pm4); + if (gpu->pfp) + release_firmware(gpu->pfp); + msm_gpu_cleanup(&gpu->base); +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h new file mode 100644 index 000000000000..6b49c4f27fec --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ADRENO_GPU_H__ +#define __ADRENO_GPU_H__ + +#include + +#include "msm_gpu.h" + +#include "adreno_common.xml.h" +#include "adreno_pm4.xml.h" + +struct adreno_rev { + uint8_t core; + uint8_t major; + uint8_t minor; + uint8_t patchid; +}; + +#define ADRENO_REV(core, major, minor, patchid) \ + ((struct adreno_rev){ core, major, minor, patchid }) + +struct adreno_gpu_funcs { + struct msm_gpu_funcs base; +}; + +struct adreno_info; + +struct adreno_rbmemptrs { + volatile uint32_t rptr; + volatile uint32_t wptr; + volatile uint32_t fence; +}; + +struct adreno_gpu { + struct msm_gpu base; + struct adreno_rev rev; + const struct adreno_info *info; + uint32_t revn; /* numeric revision name */ + const struct adreno_gpu_funcs *funcs; + + uint32_t last_fence; + + /* firmware: */ + const struct firmware *pm4, *pfp; + + /* ringbuffer rptr/wptr: */ + // TODO should this be in msm_ringbuffer? I think it would be + // different for z180.. + struct adreno_rbmemptrs *memptrs; + struct drm_gem_object *memptrs_bo; + uint32_t memptrs_iova; +}; +#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) + +/* platform config data (ie. from DT, or pdata) */ +struct adreno_platform_config { + struct adreno_rev rev; + uint32_t fast_rate, slow_rate, bus_freq; +}; + +#define ADRENO_IDLE_TIMEOUT (20 * 1000) + +static inline bool adreno_is_a3xx(struct adreno_gpu *gpu) +{ + return (gpu->revn >= 300) && (gpu->revn < 400); +} + +static inline bool adreno_is_a305(struct adreno_gpu *gpu) +{ + return gpu->revn == 305; +} + +static inline bool adreno_is_a320(struct adreno_gpu *gpu) +{ + return gpu->revn == 320; +} + +static inline bool adreno_is_a330(struct adreno_gpu *gpu) +{ + return gpu->revn == 330; +} + +int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); +int adreno_hw_init(struct msm_gpu *gpu); +uint32_t adreno_last_fence(struct msm_gpu *gpu); +int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx); +void adreno_flush(struct msm_gpu *gpu); +void adreno_idle(struct msm_gpu *gpu); +#ifdef CONFIG_DEBUG_FS +void adreno_show(struct msm_gpu *gpu, struct seq_file *m); +#endif +void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); + +int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, + struct adreno_rev rev); +void adreno_gpu_cleanup(struct adreno_gpu *gpu); + + +/* ringbuffer helpers (the parts that are adreno specific) */ + +static inline void +OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) +{ + adreno_wait_ring(ring->gpu, cnt+1); + OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); +} + +/* no-op packet: */ +static inline void +OUT_PKT2(struct msm_ringbuffer *ring) +{ + adreno_wait_ring(ring->gpu, 1); + OUT_RING(ring, CP_TYPE2_PKT); +} + +static inline void +OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) +{ + adreno_wait_ring(ring->gpu, cnt+1); + OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); +} + + +#endif /* __ADRENO_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b5ae0dbe1eb8..864c9773636b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -16,6 +16,7 @@ */ #include "msm_drv.h" +#include "msm_gpu.h" #include @@ -135,6 +136,7 @@ static int msm_unload(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; + struct msm_gpu *gpu = priv->gpu; drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); @@ -152,6 +154,12 @@ static int msm_unload(struct drm_device *dev) kms->funcs->destroy(kms); } + if (gpu) { + mutex_lock(&dev->struct_mutex); + gpu->funcs->pm_suspend(gpu); + gpu->funcs->destroy(gpu); + mutex_unlock(&dev->struct_mutex); + } dev->dev_private = NULL; @@ -176,6 +184,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) dev->dev_private = priv; priv->wq = alloc_ordered_workqueue("msm", 0); + init_waitqueue_head(&priv->fence_event); INIT_LIST_HEAD(&priv->inactive_list); @@ -240,12 +249,70 @@ fail: return ret; } +static void load_gpu(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu; + + if (priv->gpu) + return; + + mutex_lock(&dev->struct_mutex); + gpu = a3xx_gpu_init(dev); + if (IS_ERR(gpu)) { + dev_warn(dev->dev, "failed to load a3xx gpu\n"); + gpu = NULL; + /* not fatal */ + } + mutex_unlock(&dev->struct_mutex); + + if (gpu) { + int ret; + gpu->funcs->pm_resume(gpu); + ret = gpu->funcs->hw_init(gpu); + if (ret) { + dev_err(dev->dev, "gpu hw init failed: %d\n", ret); + gpu->funcs->destroy(gpu); + gpu = NULL; + } + } + + priv->gpu = gpu; +} + +static int msm_open(struct drm_device *dev, struct drm_file *file) +{ + struct msm_file_private *ctx; + + /* For now, load gpu on open.. to avoid the requirement of having + * firmware in the initrd. + */ + load_gpu(dev); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + file->driver_priv = ctx; + + return 0; +} + static void msm_preclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; + struct msm_file_private *ctx = file->driver_priv; struct msm_kms *kms = priv->kms; + if (kms) kms->funcs->preclose(kms, file); + + mutex_lock(&dev->struct_mutex); + if (ctx == priv->lastctx) + priv->lastctx = NULL; + mutex_unlock(&dev->struct_mutex); + + kfree(ctx); } static void msm_lastclose(struct drm_device *dev) @@ -316,11 +383,30 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id) */ #ifdef CONFIG_DEBUG_FS +static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + + if (gpu) { + seq_printf(m, "%s Status:\n", gpu->name); + gpu->funcs->show(gpu, m); + } + + return 0; +} + static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + + if (gpu) { + seq_printf(m, "Active Objects (%s):\n", gpu->name); + msm_gem_describe_objects(&gpu->active_list, m); + } - seq_printf(m, "All Objects:\n"); + seq_printf(m, "Inactive Objects:\n"); msm_gem_describe_objects(&priv->inactive_list, m); return 0; @@ -375,6 +461,7 @@ static int show_locked(struct seq_file *m, void *arg) } static struct drm_info_list msm_debugfs_list[] = { + {"gpu", show_locked, 0, msm_gpu_show}, {"gem", show_locked, 0, msm_gem_show}, { "mm", show_locked, 0, msm_mm_show }, { "fb", show_locked, 0, msm_fb_show }, @@ -404,6 +491,158 @@ static void msm_debugfs_cleanup(struct drm_minor *minor) } #endif +/* + * Fences: + */ + +int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, + struct timespec *timeout) +{ + struct msm_drm_private *priv = dev->dev_private; + unsigned long timeout_jiffies = timespec_to_jiffies(timeout); + unsigned long start_jiffies = jiffies; + unsigned long remaining_jiffies; + int ret; + + if (time_after(start_jiffies, timeout_jiffies)) + remaining_jiffies = 0; + else + remaining_jiffies = timeout_jiffies - start_jiffies; + + ret = wait_event_interruptible_timeout(priv->fence_event, + priv->completed_fence >= fence, + remaining_jiffies); + if (ret == 0) { + DBG("timeout waiting for fence: %u (completed: %u)", + fence, priv->completed_fence); + ret = -ETIMEDOUT; + } else if (ret != -ERESTARTSYS) { + ret = 0; + } + + return ret; +} + +/* call under struct_mutex */ +void msm_update_fence(struct drm_device *dev, uint32_t fence) +{ + struct msm_drm_private *priv = dev->dev_private; + + if (fence > priv->completed_fence) { + priv->completed_fence = fence; + wake_up_all(&priv->fence_event); + } +} + +/* + * DRM ioctls: + */ + +static int msm_ioctl_get_param(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_param *args = data; + struct msm_gpu *gpu; + + /* for now, we just have 3d pipe.. eventually this would need to + * be more clever to dispatch to appropriate gpu module: + */ + if (args->pipe != MSM_PIPE_3D0) + return -EINVAL; + + gpu = priv->gpu; + + if (!gpu) + return -ENXIO; + + return gpu->funcs->get_param(gpu, args->param, &args->value); +} + +static int msm_ioctl_gem_new(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_gem_new *args = data; + return msm_gem_new_handle(dev, file, args->size, + args->flags, &args->handle); +} + +#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec }) + +static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_gem_cpu_prep *args = data; + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout)); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_gem_cpu_fini *args = data; + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = msm_gem_cpu_fini(obj); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int msm_ioctl_gem_info(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_gem_info *args = data; + struct drm_gem_object *obj; + int ret = 0; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + args->offset = msm_gem_mmap_offset(obj); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_wait_fence *args = data; + return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout)); +} + +static const struct drm_ioctl_desc msm_ioctls[] = { + DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH), +}; + static const struct vm_operations_struct vm_ops = { .fault = msm_gem_fault, .open = drm_gem_vm_open, @@ -428,6 +667,7 @@ static struct drm_driver msm_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, .load = msm_load, .unload = msm_unload, + .open = msm_open, .preclose = msm_preclose, .lastclose = msm_lastclose, .irq_handler = msm_irq, @@ -446,6 +686,8 @@ static struct drm_driver msm_driver = { .debugfs_init = msm_debugfs_init, .debugfs_cleanup = msm_debugfs_cleanup, #endif + .ioctls = msm_ioctls, + .num_ioctls = DRM_MSM_NUM_IOCTLS, .fops = &fops, .name = "msm", .desc = "MSM Snapdragon DRM", @@ -514,6 +756,7 @@ static int __init msm_drm_register(void) { DBG("init"); hdmi_register(); + a3xx_register(); return platform_driver_register(&msm_platform_driver); } @@ -522,6 +765,7 @@ static void __exit msm_drm_unregister(void) DBG("fini"); platform_driver_unregister(&msm_platform_driver); hdmi_unregister(); + a3xx_unregister(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 36f8ba2f5c84..34c36b2911d9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -40,17 +40,34 @@ #include #include #include +#include struct msm_kms; +struct msm_gpu; -#define NUM_DOMAINS 1 /* one for KMS, then one per gpu core (?) */ +#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ + +struct msm_file_private { + /* currently we don't do anything useful with this.. but when + * per-context address spaces are supported we'd keep track of + * the context's page-tables here. + */ + int dummy; +}; struct msm_drm_private { struct msm_kms *kms; + /* when we have more than one 'msm_gpu' these need to be an array: */ + struct msm_gpu *gpu; + struct msm_file_private *lastctx; + struct drm_fb_helper *fbdev; + uint32_t next_fence, completed_fence; + wait_queue_head_t fence_event; + /* list of GEM objects: */ struct list_head inactive_list; @@ -108,6 +125,13 @@ int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu); int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, const char **names, int cnt); +int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, + struct timespec *timeout); +void msm_update_fence(struct drm_device *dev, uint32_t fence); + +int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file); + int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); @@ -125,6 +149,12 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj); void *msm_gem_vaddr(struct drm_gem_object *obj); int msm_gem_queue_inactive_work(struct drm_gem_object *obj, struct work_struct *work); +void msm_gem_move_to_active(struct drm_gem_object *obj, + struct msm_gpu *gpu, uint32_t fence); +void msm_gem_move_to_inactive(struct drm_gem_object *obj); +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, + struct timespec *timeout); +int msm_gem_cpu_fini(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj); int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle); @@ -168,20 +198,14 @@ static inline int align_pitch(int width, int bpp) /* for the generated headers: */ #define INVALID_IDX(idx) ({BUG(); 0;}) +#define fui(x) ({BUG(); 0;}) +#define util_float_to_half(x) ({BUG(); 0;}) + #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) /* for conditionally setting boolean flag(s): */ #define COND(bool, val) ((bool) ? (val) : 0) -/* just put these here until we start adding driver private ioctls: */ -// TODO might shuffle these around.. just need something for now.. -#define MSM_BO_CACHE_MASK 0x0000000f -#define MSM_BO_SCANOUT 0x00010000 /* scanout capable */ - -#define MSM_BO_CACHED 0x00000001 /* default */ -#define MSM_BO_WC 0x0000002 -#define MSM_BO_UNCACHED 0x00000004 - #endif /* __MSM_DRV_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a52e6cca8403..6b5a6c8c7658 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -20,6 +20,7 @@ #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu.h" /* called with dev->struct_mutex held */ @@ -375,10 +376,74 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, { struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + int ret = 0; + + mutex_lock(&dev->struct_mutex); + if (!list_empty(&work->entry)) { + ret = -EINVAL; + } else if (is_active(msm_obj)) { + list_add_tail(&work->entry, &msm_obj->inactive_work); + } else { + queue_work(priv->wq, work); + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +void msm_gem_move_to_active(struct drm_gem_object *obj, + struct msm_gpu *gpu, uint32_t fence) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + msm_obj->gpu = gpu; + msm_obj->fence = fence; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &gpu->active_list); +} + +void msm_gem_move_to_inactive(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + msm_obj->gpu = NULL; + msm_obj->fence = 0; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + + while (!list_empty(&msm_obj->inactive_work)) { + struct work_struct *work; + + work = list_first_entry(&msm_obj->inactive_work, + struct work_struct, entry); + + list_del_init(&work->entry); + queue_work(priv->wq, work); + } +} + +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, + struct timespec *timeout) +{ + struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + int ret = 0; + + if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) + ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); + + /* TODO cache maintenance */ - /* just a place-holder until we have gpu.. */ - queue_work(priv->wq, work); + return ret; +} +int msm_gem_cpu_fini(struct drm_gem_object *obj) +{ + /* TODO cache maintenance */ return 0; } @@ -390,8 +455,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) uint64_t off = drm_vma_node_start(&obj->vma_node); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n", - msm_obj->flags, obj->name, obj->refcount.refcount.counter, + seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", + msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', + msm_obj->fence, obj->name, obj->refcount.refcount.counter, off, msm_obj->vaddr, obj->size); } @@ -421,6 +487,9 @@ void msm_gem_free_object(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + /* object should not be on active list: */ + WARN_ON(is_active(msm_obj)); + list_del(&msm_obj->mm_list); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { @@ -439,6 +508,9 @@ void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); + if (msm_obj->resv == &msm_obj->_resv) + reservation_object_fini(msm_obj->resv); + drm_gem_object_release(obj); kfree(msm_obj); @@ -508,7 +580,11 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, msm_obj->flags = flags; + msm_obj->resv = &msm_obj->_resv; + reservation_object_init(msm_obj->resv); + INIT_LIST_HEAD(&msm_obj->submit_entry); + INIT_LIST_HEAD(&msm_obj->inactive_work); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); return obj; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index fcafd1965151..d746f13d283c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -18,6 +18,7 @@ #ifndef __MSM_GEM_H__ #define __MSM_GEM_H__ +#include #include "msm_drv.h" struct msm_gem_object { @@ -25,7 +26,27 @@ struct msm_gem_object { uint32_t flags; + /* And object is either: + * inactive - on priv->inactive_list + * active - on one one of the gpu's active_list.. well, at + * least for now we don't have (I don't think) hw sync between + * 2d and 3d one devices which have both, meaning we need to + * block on submit if a bo is already on other ring + * + */ struct list_head mm_list; + struct msm_gpu *gpu; /* non-null if active */ + uint32_t fence; + + /* Transiently in the process of submit ioctl, objects associated + * with the submit are on submit->bo_list.. this only lasts for + * the duration of the ioctl, so one bo can never be on multiple + * submit lists. + */ + struct list_head submit_entry; + + /* work defered until bo is inactive: */ + struct list_head inactive_work; struct page **pages; struct sg_table *sgt; @@ -35,7 +56,44 @@ struct msm_gem_object { // XXX uint32_t iova; } domain[NUM_DOMAINS]; + + /* normally (resv == &_resv) except for imported bo's */ + struct reservation_object *resv; + struct reservation_object _resv; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) +static inline bool is_active(struct msm_gem_object *msm_obj) +{ + return msm_obj->gpu != NULL; +} + +#define MAX_CMDS 4 + +/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, + * associated with the cmdstream submission for synchronization (and + * make it easier to unwind when things go wrong, etc). This only + * lasts for the duration of the submit-ioctl. + */ +struct msm_gem_submit { + struct drm_device *dev; + struct msm_gpu *gpu; + struct list_head bo_list; + struct ww_acquire_ctx ticket; + uint32_t fence; + bool valid; + unsigned int nr_cmds; + unsigned int nr_bos; + struct { + uint32_t type; + uint32_t size; /* in dwords */ + uint32_t iova; + } cmd[MAX_CMDS]; + struct { + uint32_t flags; + struct msm_gem_object *obj; + uint32_t iova; + } bos[0]; +}; + #endif /* __MSM_GEM_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c new file mode 100644 index 000000000000..3e1ef3a00f60 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" +#include "msm_gpu.h" +#include "msm_gem.h" + +/* + * Cmdstream submission: + */ + +#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) +/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ +#define BO_VALID 0x8000 +#define BO_LOCKED 0x4000 +#define BO_PINNED 0x2000 + +static inline void __user *to_user_ptr(u64 address) +{ + return (void __user *)(uintptr_t)address; +} + +static struct msm_gem_submit *submit_create(struct drm_device *dev, + struct msm_gpu *gpu, int nr) +{ + struct msm_gem_submit *submit; + int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); + + submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + if (submit) { + submit->dev = dev; + submit->gpu = gpu; + + /* initially, until copy_from_user() and bo lookup succeeds: */ + submit->nr_bos = 0; + submit->nr_cmds = 0; + + INIT_LIST_HEAD(&submit->bo_list); + ww_acquire_init(&submit->ticket, &reservation_ww_class); + } + + return submit; +} + +static int submit_lookup_objects(struct msm_gem_submit *submit, + struct drm_msm_gem_submit *args, struct drm_file *file) +{ + unsigned i; + int ret = 0; + + spin_lock(&file->table_lock); + + for (i = 0; i < args->nr_bos; i++) { + struct drm_msm_gem_submit_bo submit_bo; + struct drm_gem_object *obj; + struct msm_gem_object *msm_obj; + void __user *userptr = + to_user_ptr(args->bos + (i * sizeof(submit_bo))); + + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); + if (ret) { + ret = -EFAULT; + goto out_unlock; + } + + if (submit_bo.flags & BO_INVALID_FLAGS) { + DBG("invalid flags: %x", submit_bo.flags); + ret = -EINVAL; + goto out_unlock; + } + + submit->bos[i].flags = submit_bo.flags; + /* in validate_objects() we figure out if this is true: */ + submit->bos[i].iova = submit_bo.presumed; + + /* normally use drm_gem_object_lookup(), but for bulk lookup + * all under single table_lock just hit object_idr directly: + */ + obj = idr_find(&file->object_idr, submit_bo.handle); + if (!obj) { + DBG("invalid handle %u at index %u", submit_bo.handle, i); + ret = -EINVAL; + goto out_unlock; + } + + msm_obj = to_msm_bo(obj); + + if (!list_empty(&msm_obj->submit_entry)) { + DBG("handle %u at index %u already on submit list", + submit_bo.handle, i); + ret = -EINVAL; + goto out_unlock; + } + + drm_gem_object_reference(obj); + + submit->bos[i].obj = msm_obj; + + list_add_tail(&msm_obj->submit_entry, &submit->bo_list); + } + +out_unlock: + submit->nr_bos = i; + spin_unlock(&file->table_lock); + + return ret; +} + +static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) +{ + struct msm_gem_object *msm_obj = submit->bos[i].obj; + + if (submit->bos[i].flags & BO_PINNED) + msm_gem_put_iova(&msm_obj->base, submit->gpu->id); + + if (submit->bos[i].flags & BO_LOCKED) + ww_mutex_unlock(&msm_obj->resv->lock); + + if (!(submit->bos[i].flags & BO_VALID)) + submit->bos[i].iova = 0; + + submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED); +} + +/* This is where we make sure all the bo's are reserved and pin'd: */ +static int submit_validate_objects(struct msm_gem_submit *submit) +{ + int contended, slow_locked = -1, i, ret = 0; + +retry: + submit->valid = true; + + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + uint32_t iova; + + if (slow_locked == i) + slow_locked = -1; + + contended = i; + + if (!(submit->bos[i].flags & BO_LOCKED)) { + ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, + &submit->ticket); + if (ret) + goto fail; + submit->bos[i].flags |= BO_LOCKED; + } + + + /* if locking succeeded, pin bo: */ + ret = msm_gem_get_iova(&msm_obj->base, + submit->gpu->id, &iova); + + /* this would break the logic in the fail path.. there is no + * reason for this to happen, but just to be on the safe side + * let's notice if this starts happening in the future: + */ + WARN_ON(ret == -EDEADLK); + + if (ret) + goto fail; + + submit->bos[i].flags |= BO_PINNED; + + if (iova == submit->bos[i].iova) { + submit->bos[i].flags |= BO_VALID; + } else { + submit->bos[i].iova = iova; + submit->bos[i].flags &= ~BO_VALID; + submit->valid = false; + } + } + + ww_acquire_done(&submit->ticket); + + return 0; + +fail: + for (; i >= 0; i--) + submit_unlock_unpin_bo(submit, i); + + if (slow_locked > 0) + submit_unlock_unpin_bo(submit, slow_locked); + + if (ret == -EDEADLK) { + struct msm_gem_object *msm_obj = submit->bos[contended].obj; + /* we lost out in a seqno race, lock and retry.. */ + ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, + &submit->ticket); + if (!ret) { + submit->bos[contended].flags |= BO_LOCKED; + slow_locked = contended; + goto retry; + } + } + + return ret; +} + +static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, + struct msm_gem_object **obj, uint32_t *iova, bool *valid) +{ + if (idx >= submit->nr_bos) { + DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); + return EINVAL; + } + + if (obj) + *obj = submit->bos[idx].obj; + if (iova) + *iova = submit->bos[idx].iova; + if (valid) + *valid = !!(submit->bos[idx].flags & BO_VALID); + + return 0; +} + +/* process the reloc's and patch up the cmdstream as needed: */ +static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, + uint32_t offset, uint32_t nr_relocs, uint64_t relocs) +{ + uint32_t i, last_offset = 0; + uint32_t *ptr; + int ret; + + if (offset % 4) { + DBG("non-aligned cmdstream buffer: %u", offset); + return -EINVAL; + } + + /* For now, just map the entire thing. Eventually we probably + * to do it page-by-page, w/ kmap() if not vmap()d.. + */ + ptr = msm_gem_vaddr(&obj->base); + + if (IS_ERR(ptr)) { + ret = PTR_ERR(ptr); + DBG("failed to map: %d", ret); + return ret; + } + + for (i = 0; i < nr_relocs; i++) { + struct drm_msm_gem_submit_reloc submit_reloc; + void __user *userptr = + to_user_ptr(relocs + (i * sizeof(submit_reloc))); + uint32_t iova, off; + bool valid; + + ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); + if (ret) + return -EFAULT; + + if (submit_reloc.submit_offset % 4) { + DBG("non-aligned reloc offset: %u", + submit_reloc.submit_offset); + return -EINVAL; + } + + /* offset in dwords: */ + off = submit_reloc.submit_offset / 4; + + if ((off >= (obj->base.size / 4)) || + (off < last_offset)) { + DBG("invalid offset %u at reloc %u", off, i); + return -EINVAL; + } + + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); + if (ret) + return ret; + + if (valid) + continue; + + iova += submit_reloc.reloc_offset; + + if (submit_reloc.shift < 0) + iova >>= -submit_reloc.shift; + else + iova <<= submit_reloc.shift; + + ptr[off] = iova | submit_reloc.or; + + last_offset = off; + } + + return 0; +} + +static void submit_cleanup(struct msm_gem_submit *submit, bool fail) +{ + unsigned i; + + mutex_lock(&submit->dev->struct_mutex); + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + submit_unlock_unpin_bo(submit, i); + list_del_init(&msm_obj->submit_entry); + drm_gem_object_unreference(&msm_obj->base); + } + mutex_unlock(&submit->dev->struct_mutex); + + ww_acquire_fini(&submit->ticket); + kfree(submit); +} + +int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_gem_submit *args = data; + struct msm_file_private *ctx = file->driver_priv; + struct msm_gem_submit *submit; + struct msm_gpu *gpu; + unsigned i; + int ret; + + /* for now, we just have 3d pipe.. eventually this would need to + * be more clever to dispatch to appropriate gpu module: + */ + if (args->pipe != MSM_PIPE_3D0) + return -EINVAL; + + gpu = priv->gpu; + + if (args->nr_cmds > MAX_CMDS) + return -EINVAL; + + submit = submit_create(dev, gpu, args->nr_bos); + if (!submit) { + ret = -ENOMEM; + goto out; + } + + ret = submit_lookup_objects(submit, args, file); + if (ret) + goto out; + + ret = submit_validate_objects(submit); + if (ret) + goto out; + + for (i = 0; i < args->nr_cmds; i++) { + struct drm_msm_gem_submit_cmd submit_cmd; + void __user *userptr = + to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + struct msm_gem_object *msm_obj; + uint32_t iova; + + ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); + if (ret) { + ret = -EFAULT; + goto out; + } + + ret = submit_bo(submit, submit_cmd.submit_idx, + &msm_obj, &iova, NULL); + if (ret) + goto out; + + if (submit_cmd.size % 4) { + DBG("non-aligned cmdstream buffer size: %u", + submit_cmd.size); + ret = -EINVAL; + goto out; + } + + if (submit_cmd.size >= msm_obj->base.size) { + DBG("invalid cmdstream size: %u", submit_cmd.size); + ret = -EINVAL; + goto out; + } + + submit->cmd[i].type = submit_cmd.type; + submit->cmd[i].size = submit_cmd.size / 4; + submit->cmd[i].iova = iova + submit_cmd.submit_offset; + + if (submit->valid) + continue; + + ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset, + submit_cmd.nr_relocs, submit_cmd.relocs); + if (ret) + goto out; + } + + submit->nr_cmds = i; + + ret = msm_gpu_submit(gpu, submit, ctx); + + args->fence = submit->fence; + +out: + if (submit) + submit_cleanup(submit, !!ret); + return ret; +} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c new file mode 100644 index 000000000000..7c6541e4a7ec --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -0,0 +1,411 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_gpu.h" +#include "msm_gem.h" + + +/* + * Power Management: + */ + +#ifdef CONFIG_MSM_BUS_SCALING +#include +#include +static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct drm_device *dev = gpu->dev; + struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; + + if (!pdev) { + dev_err(dev->dev, "could not find dtv pdata\n"); + return; + } + + if (pdata->bus_scale_table) { + gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); + DBG("bus scale client: %08x", gpu->bsc); + } +} + +static void bs_fini(struct msm_gpu *gpu) +{ + if (gpu->bsc) { + msm_bus_scale_unregister_client(gpu->bsc); + gpu->bsc = 0; + } +} + +static void bs_set(struct msm_gpu *gpu, int idx) +{ + if (gpu->bsc) { + DBG("set bus scaling: %d", idx); + msm_bus_scale_client_update_request(gpu->bsc, idx); + } +} +#else +static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {} +static void bs_fini(struct msm_gpu *gpu) {} +static void bs_set(struct msm_gpu *gpu, int idx) {} +#endif + +static int enable_pwrrail(struct msm_gpu *gpu) +{ + struct drm_device *dev = gpu->dev; + int ret = 0; + + if (gpu->gpu_reg) { + ret = regulator_enable(gpu->gpu_reg); + if (ret) { + dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); + return ret; + } + } + + if (gpu->gpu_cx) { + ret = regulator_enable(gpu->gpu_cx); + if (ret) { + dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); + return ret; + } + } + + return 0; +} + +static int disable_pwrrail(struct msm_gpu *gpu) +{ + if (gpu->gpu_cx) + regulator_disable(gpu->gpu_cx); + if (gpu->gpu_reg) + regulator_disable(gpu->gpu_reg); + return 0; +} + +static int enable_clk(struct msm_gpu *gpu) +{ + struct clk *rate_clk = NULL; + int i; + + /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { + if (gpu->grp_clks[i]) { + clk_prepare(gpu->grp_clks[i]); + rate_clk = gpu->grp_clks[i]; + } + } + + if (rate_clk && gpu->fast_rate) + clk_set_rate(rate_clk, gpu->fast_rate); + + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + if (gpu->grp_clks[i]) + clk_enable(gpu->grp_clks[i]); + + return 0; +} + +static int disable_clk(struct msm_gpu *gpu) +{ + struct clk *rate_clk = NULL; + int i; + + /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { + if (gpu->grp_clks[i]) { + clk_disable(gpu->grp_clks[i]); + rate_clk = gpu->grp_clks[i]; + } + } + + if (rate_clk && gpu->slow_rate) + clk_set_rate(rate_clk, gpu->slow_rate); + + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + if (gpu->grp_clks[i]) + clk_unprepare(gpu->grp_clks[i]); + + return 0; +} + +static int enable_axi(struct msm_gpu *gpu) +{ + if (gpu->ebi1_clk) + clk_prepare_enable(gpu->ebi1_clk); + if (gpu->bus_freq) + bs_set(gpu, gpu->bus_freq); + return 0; +} + +static int disable_axi(struct msm_gpu *gpu) +{ + if (gpu->ebi1_clk) + clk_disable_unprepare(gpu->ebi1_clk); + if (gpu->bus_freq) + bs_set(gpu, 0); + return 0; +} + +int msm_gpu_pm_resume(struct msm_gpu *gpu) +{ + int ret; + + DBG("%s", gpu->name); + + ret = enable_pwrrail(gpu); + if (ret) + return ret; + + ret = enable_clk(gpu); + if (ret) + return ret; + + ret = enable_axi(gpu); + if (ret) + return ret; + + return 0; +} + +int msm_gpu_pm_suspend(struct msm_gpu *gpu) +{ + int ret; + + DBG("%s", gpu->name); + + ret = disable_axi(gpu); + if (ret) + return ret; + + ret = disable_clk(gpu); + if (ret) + return ret; + + ret = disable_pwrrail(gpu); + if (ret) + return ret; + + return 0; +} + +/* + * Cmdstream submission/retirement: + */ + +static void retire_worker(struct work_struct *work) +{ + struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); + struct drm_device *dev = gpu->dev; + uint32_t fence = gpu->funcs->last_fence(gpu); + + mutex_lock(&dev->struct_mutex); + + while (!list_empty(&gpu->active_list)) { + struct msm_gem_object *obj; + + obj = list_first_entry(&gpu->active_list, + struct msm_gem_object, mm_list); + + if (obj->fence <= fence) { + /* move to inactive: */ + msm_gem_move_to_inactive(&obj->base); + msm_gem_put_iova(&obj->base, gpu->id); + drm_gem_object_unreference(&obj->base); + } else { + break; + } + } + + msm_update_fence(gpu->dev, fence); + + mutex_unlock(&dev->struct_mutex); +} + +/* call from irq handler to schedule work to retire bo's */ +void msm_gpu_retire(struct msm_gpu *gpu) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + queue_work(priv->wq, &gpu->retire_work); +} + +/* add bo's to gpu's ring, and kick gpu: */ +int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + int i, ret; + + mutex_lock(&dev->struct_mutex); + + submit->fence = ++priv->next_fence; + + ret = gpu->funcs->submit(gpu, submit, ctx); + priv->lastctx = ctx; + + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + + /* can't happen yet.. but when we add 2d support we'll have + * to deal w/ cross-ring synchronization: + */ + WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); + + if (!is_active(msm_obj)) { + uint32_t iova; + + /* ring takes a reference to the bo and iova: */ + drm_gem_object_reference(&msm_obj->base); + msm_gem_get_iova_locked(&msm_obj->base, + submit->gpu->id, &iova); + } + + msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/* + * Init/Cleanup: + */ + +static irqreturn_t irq_handler(int irq, void *data) +{ + struct msm_gpu *gpu = data; + return gpu->funcs->irq(gpu); +} + +static const char *clk_names[] = { + "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", +}; + +int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, + const char *name, const char *ioname, const char *irqname, int ringsz) +{ + int i, ret; + + gpu->dev = drm; + gpu->funcs = funcs; + gpu->name = name; + + INIT_LIST_HEAD(&gpu->active_list); + INIT_WORK(&gpu->retire_work, retire_worker); + + BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); + + /* Map registers: */ + gpu->mmio = msm_ioremap(pdev, ioname, name); + if (IS_ERR(gpu->mmio)) { + ret = PTR_ERR(gpu->mmio); + goto fail; + } + + /* Get Interrupt: */ + gpu->irq = platform_get_irq_byname(pdev, irqname); + if (gpu->irq < 0) { + ret = gpu->irq; + dev_err(drm->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, + IRQF_TRIGGER_HIGH, gpu->name, gpu); + if (ret) { + dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); + goto fail; + } + + /* Acquire clocks: */ + for (i = 0; i < ARRAY_SIZE(clk_names); i++) { + gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); + DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); + if (IS_ERR(gpu->grp_clks[i])) + gpu->grp_clks[i] = NULL; + } + + gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); + DBG("ebi1_clk: %p", gpu->ebi1_clk); + if (IS_ERR(gpu->ebi1_clk)) + gpu->ebi1_clk = NULL; + + /* Acquire regulators: */ + gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); + DBG("gpu_reg: %p", gpu->gpu_reg); + if (IS_ERR(gpu->gpu_reg)) + gpu->gpu_reg = NULL; + + gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); + DBG("gpu_cx: %p", gpu->gpu_cx); + if (IS_ERR(gpu->gpu_cx)) + gpu->gpu_cx = NULL; + + /* Setup IOMMU.. eventually we will (I think) do this once per context + * and have separate page tables per context. For now, to keep things + * simple and to get something working, just use a single address space: + */ + gpu->iommu = iommu_domain_alloc(&platform_bus_type); + if (!gpu->iommu) { + dev_err(drm->dev, "failed to allocate IOMMU\n"); + ret = -ENOMEM; + goto fail; + } + gpu->id = msm_register_iommu(drm, gpu->iommu); + + /* Create ringbuffer: */ + gpu->rb = msm_ringbuffer_new(gpu, ringsz); + if (IS_ERR(gpu->rb)) { + ret = PTR_ERR(gpu->rb); + gpu->rb = NULL; + dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); + goto fail; + } + + ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); + if (ret) { + gpu->rb_iova = 0; + dev_err(drm->dev, "could not map ringbuffer: %d\n", ret); + goto fail; + } + + bs_init(gpu, pdev); + + return 0; + +fail: + return ret; +} + +void msm_gpu_cleanup(struct msm_gpu *gpu) +{ + DBG("%s", gpu->name); + + WARN_ON(!list_empty(&gpu->active_list)); + + bs_fini(gpu); + + if (gpu->rb) { + if (gpu->rb_iova) + msm_gem_put_iova(gpu->rb->bo, gpu->id); + msm_ringbuffer_destroy(gpu->rb); + } + + if (gpu->iommu) + iommu_domain_free(gpu->iommu); +} diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h new file mode 100644 index 000000000000..8d2cd6c2226b --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_GPU_H__ +#define __MSM_GPU_H__ + +#include +#include + +#include "msm_drv.h" +#include "msm_ringbuffer.h" + +struct msm_gem_submit; + +/* So far, with hardware that I've seen to date, we can have: + * + zero, one, or two z180 2d cores + * + a3xx or a2xx 3d core, which share a common CP (the firmware + * for the CP seems to implement some different PM4 packet types + * but the basics of cmdstream submission are the same) + * + * Which means that the eventual complete "class" hierarchy, once + * support for all past and present hw is in place, becomes: + * + msm_gpu + * + adreno_gpu + * + a3xx_gpu + * + a2xx_gpu + * + z180_gpu + */ +struct msm_gpu_funcs { + int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value); + int (*hw_init)(struct msm_gpu *gpu); + int (*pm_suspend)(struct msm_gpu *gpu); + int (*pm_resume)(struct msm_gpu *gpu); + int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx); + void (*flush)(struct msm_gpu *gpu); + void (*idle)(struct msm_gpu *gpu); + irqreturn_t (*irq)(struct msm_gpu *irq); + uint32_t (*last_fence)(struct msm_gpu *gpu); + void (*destroy)(struct msm_gpu *gpu); +#ifdef CONFIG_DEBUG_FS + /* show GPU status in debugfs: */ + void (*show)(struct msm_gpu *gpu, struct seq_file *m); +#endif +}; + +struct msm_gpu { + const char *name; + struct drm_device *dev; + const struct msm_gpu_funcs *funcs; + + struct msm_ringbuffer *rb; + uint32_t rb_iova; + + /* list of GEM active objects: */ + struct list_head active_list; + + /* worker for handling active-list retiring: */ + struct work_struct retire_work; + + void __iomem *mmio; + int irq; + + struct iommu_domain *iommu; + int id; + + /* Power Control: */ + struct regulator *gpu_reg, *gpu_cx; + struct clk *ebi1_clk, *grp_clks[5]; + uint32_t fast_rate, slow_rate, bus_freq; + uint32_t bsc; +}; + +static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) +{ + msm_writel(data, gpu->mmio + (reg << 2)); +} + +static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) +{ + return msm_readl(gpu->mmio + (reg << 2)); +} + +int msm_gpu_pm_suspend(struct msm_gpu *gpu); +int msm_gpu_pm_resume(struct msm_gpu *gpu); + +void msm_gpu_retire(struct msm_gpu *gpu); +int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx); + +int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, + const char *name, const char *ioname, const char *irqname, int ringsz); +void msm_gpu_cleanup(struct msm_gpu *gpu); + +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); +void __init a3xx_register(void); +void __exit a3xx_unregister(void); + +#endif /* __MSM_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c new file mode 100644 index 000000000000..8171537dd7d1 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_ringbuffer.h" +#include "msm_gpu.h" + +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) +{ + struct msm_ringbuffer *ring; + int ret; + + size = ALIGN(size, 4); /* size should be dword aligned */ + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + ret = -ENOMEM; + goto fail; + } + + ring->gpu = gpu; + ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); + if (IS_ERR(ring->bo)) { + ret = PTR_ERR(ring->bo); + ring->bo = NULL; + goto fail; + } + + ring->start = msm_gem_vaddr_locked(ring->bo); + ring->end = ring->start + (size / 4); + ring->cur = ring->start; + + ring->size = size; + + return ring; + +fail: + if (ring) + msm_ringbuffer_destroy(ring); + return ERR_PTR(ret); +} + +void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) +{ + if (ring->bo) + drm_gem_object_unreference(ring->bo); + kfree(ring); +} diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h new file mode 100644 index 000000000000..6e0e1049fa4f --- /dev/null +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_RINGBUFFER_H__ +#define __MSM_RINGBUFFER_H__ + +#include "msm_drv.h" + +struct msm_ringbuffer { + struct msm_gpu *gpu; + int size; + struct drm_gem_object *bo; + uint32_t *start, *end, *cur; +}; + +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size); +void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); + +/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */ + +static inline void +OUT_RING(struct msm_ringbuffer *ring, uint32_t data) +{ + if (ring->cur == ring->end) + ring->cur = ring->start; + *(ring->cur++) = data; +} + +#endif /* __MSM_RINGBUFFER_H__ */ diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 119487e05e65..2d9a25daab05 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -16,3 +16,4 @@ header-y += sis_drm.h header-y += tegra_drm.h header-y += via_drm.h header-y += vmwgfx_drm.h +header-y += msm_drm.h diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h new file mode 100644 index 000000000000..d3c62074016d --- /dev/null +++ b/include/uapi/drm/msm_drm.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MSM_DRM_H__ +#define __MSM_DRM_H__ + +#include +#include + +/* Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints: + * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit + * user/kernel compatibility + * 2) Keep fields aligned to their size + * 3) Because of how drm_ioctl() works, we can add new fields at + * the end of an ioctl if some care is taken: drm_ioctl() will + * zero out the new fields at the tail of the ioctl, so a zero + * value should have a backwards compatible meaning. And for + * output params, userspace won't see the newly added output + * fields.. so that has to be somehow ok. + */ + +#define MSM_PIPE_NONE 0x00 +#define MSM_PIPE_2D0 0x01 +#define MSM_PIPE_2D1 0x02 +#define MSM_PIPE_3D0 0x10 + +/* timeouts are specified in clock-monotonic absolute times (to simplify + * restarting interrupted ioctls). The following struct is logically the + * same as 'struct timespec' but 32/64b ABI safe. + */ +struct drm_msm_timespec { + int64_t tv_sec; /* seconds */ + int64_t tv_nsec; /* nanoseconds */ +}; + +#define MSM_PARAM_GPU_ID 0x01 +#define MSM_PARAM_GMEM_SIZE 0x02 + +struct drm_msm_param { + uint32_t pipe; /* in, MSM_PIPE_x */ + uint32_t param; /* in, MSM_PARAM_x */ + uint64_t value; /* out (get_param) or in (set_param) */ +}; + +/* + * GEM buffers: + */ + +#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */ +#define MSM_BO_GPU_READONLY 0x00000002 +#define MSM_BO_CACHE_MASK 0x000f0000 +/* cache modes */ +#define MSM_BO_CACHED 0x00010000 +#define MSM_BO_WC 0x00020000 +#define MSM_BO_UNCACHED 0x00040000 + +struct drm_msm_gem_new { + uint64_t size; /* in */ + uint32_t flags; /* in, mask of MSM_BO_x */ + uint32_t handle; /* out */ +}; + +struct drm_msm_gem_info { + uint32_t handle; /* in */ + uint32_t pad; + uint64_t offset; /* out, offset to pass to mmap() */ +}; + +#define MSM_PREP_READ 0x01 +#define MSM_PREP_WRITE 0x02 +#define MSM_PREP_NOSYNC 0x04 + +struct drm_msm_gem_cpu_prep { + uint32_t handle; /* in */ + uint32_t op; /* in, mask of MSM_PREP_x */ + struct drm_msm_timespec timeout; /* in */ +}; + +struct drm_msm_gem_cpu_fini { + uint32_t handle; /* in */ +}; + +/* + * Cmdstream Submission: + */ + +/* The value written into the cmdstream is logically: + * + * ((relocbuf->gpuaddr + reloc_offset) << shift) | or + * + * When we have GPU's w/ >32bit ptrs, it should be possible to deal + * with this by emit'ing two reloc entries with appropriate shift + * values. Or a new MSM_SUBMIT_CMD_x type would also be an option. + * + * NOTE that reloc's must be sorted by order of increasing submit_offset, + * otherwise EINVAL. + */ +struct drm_msm_gem_submit_reloc { + uint32_t submit_offset; /* in, offset from submit_bo */ + uint32_t or; /* in, value OR'd with result */ + int32_t shift; /* in, amount of left shift (can be negative) */ + uint32_t reloc_idx; /* in, index of reloc_bo buffer */ + uint64_t reloc_offset; /* in, offset from start of reloc_bo */ +}; + +/* submit-types: + * BUF - this cmd buffer is executed normally. + * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are + * processed normally, but the kernel does not setup an IB to + * this buffer in the first-level ringbuffer + * CTX_RESTORE_BUF - only executed if there has been a GPU context + * switch since the last SUBMIT ioctl + */ +#define MSM_SUBMIT_CMD_BUF 0x0001 +#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002 +#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003 +struct drm_msm_gem_submit_cmd { + uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */ + uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */ + uint32_t submit_offset; /* in, offset into submit_bo */ + uint32_t size; /* in, cmdstream size */ + uint32_t pad; + uint32_t nr_relocs; /* in, number of submit_reloc's */ + uint64_t __user relocs; /* in, ptr to array of submit_reloc's */ +}; + +/* Each buffer referenced elsewhere in the cmdstream submit (ie. the + * cmdstream buffer(s) themselves or reloc entries) has one (and only + * one) entry in the submit->bos[] table. + * + * As a optimization, the current buffer (gpu virtual address) can be + * passed back through the 'presumed' field. If on a subsequent reloc, + * userspace passes back a 'presumed' address that is still valid, + * then patching the cmdstream for this entry is skipped. This can + * avoid kernel needing to map/access the cmdstream bo in the common + * case. + */ +#define MSM_SUBMIT_BO_READ 0x0001 +#define MSM_SUBMIT_BO_WRITE 0x0002 +struct drm_msm_gem_submit_bo { + uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */ + uint32_t handle; /* in, GEM handle */ + uint64_t presumed; /* in/out, presumed buffer address */ +}; + +/* Each cmdstream submit consists of a table of buffers involved, and + * one or more cmdstream buffers. This allows for conditional execution + * (context-restore), and IB buffers needed for per tile/bin draw cmds. + */ +struct drm_msm_gem_submit { + uint32_t pipe; /* in, MSM_PIPE_x */ + uint32_t fence; /* out */ + uint32_t nr_bos; /* in, number of submit_bo's */ + uint32_t nr_cmds; /* in, number of submit_cmd's */ + uint64_t __user bos; /* in, ptr to array of submit_bo's */ + uint64_t __user cmds; /* in, ptr to array of submit_cmd's */ +}; + +/* The normal way to synchronize with the GPU is just to CPU_PREP on + * a buffer if you need to access it from the CPU (other cmdstream + * submission from same or other contexts, PAGE_FLIP ioctl, etc, all + * handle the required synchronization under the hood). This ioctl + * mainly just exists as a way to implement the gallium pipe_fence + * APIs without requiring a dummy bo to synchronize on. + */ +struct drm_msm_wait_fence { + uint32_t fence; /* in */ + uint32_t pad; + struct drm_msm_timespec timeout; /* in */ +}; + +#define DRM_MSM_GET_PARAM 0x00 +/* placeholder: +#define DRM_MSM_SET_PARAM 0x01 + */ +#define DRM_MSM_GEM_NEW 0x02 +#define DRM_MSM_GEM_INFO 0x03 +#define DRM_MSM_GEM_CPU_PREP 0x04 +#define DRM_MSM_GEM_CPU_FINI 0x05 +#define DRM_MSM_GEM_SUBMIT 0x06 +#define DRM_MSM_WAIT_FENCE 0x07 +#define DRM_MSM_NUM_IOCTLS 0x08 + +#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) +#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) +#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info) +#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep) +#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini) +#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) +#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) + +#endif /* __MSM_DRM_H__ */ -- cgit v1.2.3 From bd6f82d8289422f618b98451a43887f452b3423e Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sat, 24 Aug 2013 14:20:38 -0400 Subject: drm/msm: add basic hangcheck/recovery mechanism A basic, no-frills recovery mechanism in case the gpu gets wedged. We could try to be a bit more fancy and restart the next submit after the one that got wedged, but for now keep it simple. This is enough to recover things if, for example, the gpu hangs mid way through a piglit run. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/adreno_gpu.c | 26 +++++++++++++++-- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 3 +- drivers/gpu/drm/msm/msm_gpu.c | 52 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.h | 10 +++++++ 5 files changed, 87 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 13d61bbed302..035bd13dc8bd 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -371,6 +371,7 @@ static const struct adreno_gpu_funcs funcs = { .hw_init = a3xx_hw_init, .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, + .recover = adreno_recover, .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 282163ee3fa5..a60584763b61 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -111,6 +111,28 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu) return adreno_gpu->memptrs->fence; } +void adreno_recover(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct drm_device *dev = gpu->dev; + int ret; + + gpu->funcs->pm_suspend(gpu); + + /* reset ringbuffer: */ + gpu->rb->cur = gpu->rb->start; + + /* reset completed fence seqno, just discard anything pending: */ + adreno_gpu->memptrs->fence = gpu->submitted_fence; + + gpu->funcs->pm_resume(gpu); + ret = gpu->funcs->hw_init(gpu); + if (ret) { + dev_err(dev->dev, "gpu hw init failed: %d\n", ret); + /* hmm, oh well? */ + } +} + int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { @@ -119,8 +141,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_ringbuffer *ring = gpu->rb; unsigned i, ibs = 0; - adreno_gpu->last_fence = submit->fence; - for (i = 0; i < submit->nr_cmds; i++) { switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: @@ -225,7 +245,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) adreno_gpu->rev.patchid); seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, - adreno_gpu->last_fence); + gpu->submitted_fence); seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 6b49c4f27fec..f73abfba7c22 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -54,8 +54,6 @@ struct adreno_gpu { uint32_t revn; /* numeric revision name */ const struct adreno_gpu_funcs *funcs; - uint32_t last_fence; - /* firmware: */ const struct firmware *pm4, *pfp; @@ -99,6 +97,7 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu) int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu); +void adreno_recover(struct msm_gpu *gpu); int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); void adreno_flush(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 7c6541e4a7ec..e1e1ec9321ff 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -202,6 +202,51 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) return 0; } +/* + * Hangcheck detection for locked gpu: + */ + +static void recover_worker(struct work_struct *work) +{ + struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); + struct drm_device *dev = gpu->dev; + + dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); + + mutex_lock(&dev->struct_mutex); + gpu->funcs->recover(gpu); + mutex_unlock(&dev->struct_mutex); + + msm_gpu_retire(gpu); +} + +static void hangcheck_timer_reset(struct msm_gpu *gpu) +{ + DBG("%s", gpu->name); + mod_timer(&gpu->hangcheck_timer, + round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); +} + +static void hangcheck_handler(unsigned long data) +{ + struct msm_gpu *gpu = (struct msm_gpu *)data; + uint32_t fence = gpu->funcs->last_fence(gpu); + + if (fence != gpu->hangcheck_fence) { + /* some progress has been made.. ya! */ + gpu->hangcheck_fence = fence; + } else if (fence < gpu->submitted_fence) { + /* no progress and not done.. hung! */ + struct msm_drm_private *priv = gpu->dev->dev_private; + gpu->hangcheck_fence = fence; + queue_work(priv->wq, &gpu->recover_work); + } + + /* if still more pending work, reset the hangcheck timer: */ + if (gpu->submitted_fence > gpu->hangcheck_fence) + hangcheck_timer_reset(gpu); +} + /* * Cmdstream submission/retirement: */ @@ -254,6 +299,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, submit->fence = ++priv->next_fence; + gpu->submitted_fence = submit->fence; + ret = gpu->funcs->submit(gpu, submit, ctx); priv->lastctx = ctx; @@ -276,6 +323,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); } + hangcheck_timer_reset(gpu); mutex_unlock(&dev->struct_mutex); return ret; @@ -307,6 +355,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); + INIT_WORK(&gpu->recover_work, recover_worker); + + setup_timer(&gpu->hangcheck_timer, hangcheck_handler, + (unsigned long)gpu); BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 8d2cd6c2226b..8cd829e520bb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -51,6 +51,7 @@ struct msm_gpu_funcs { void (*idle)(struct msm_gpu *gpu); irqreturn_t (*irq)(struct msm_gpu *irq); uint32_t (*last_fence)(struct msm_gpu *gpu); + void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS /* show GPU status in debugfs: */ @@ -69,6 +70,8 @@ struct msm_gpu { /* list of GEM active objects: */ struct list_head active_list; + uint32_t submitted_fence; + /* worker for handling active-list retiring: */ struct work_struct retire_work; @@ -83,6 +86,13 @@ struct msm_gpu { struct clk *ebi1_clk, *grp_clks[5]; uint32_t fast_rate, slow_rate, bus_freq; uint32_t bsc; + + /* Hang Detction: */ +#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ +#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) + struct timer_list hangcheck_timer; + uint32_t hangcheck_fence; + struct work_struct recover_work; }; static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) -- cgit v1.2.3 From 88d7ebe59341dc3b82e662b80809694e3c6b3766 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sun, 25 Aug 2013 18:28:57 +0200 Subject: drm/vma: add access management helpers The VMA offset manager uses a device-global address-space. Hence, any user can currently map any offset-node they want. They only need to guess the right offset. If we wanted per open-file offset spaces, we'd either need VM_NONLINEAR mappings or multiple "struct address_space" trees. As both doesn't really scale, we implement access management in the VMA manager itself. We use an rb-tree to store open-files for each VMA node. On each mmap call, GEM, TTM or the drivers must check whether the current user is allowed to map this file. We add a separate lock for each node as there is no generic lock available for the caller to protect the node easily. As we currently don't know whether an object may be used for mmap(), we have to do access management for all objects. If it turns out to slow down handle creation/deletion significantly, we can optimize it in several ways: - Most times only a single filp is added per bo so we could use a static "struct file *main_filp" which is checked/added/removed first before we fall back to the rbtree+drm_vma_offset_file. This could be even done lockless with rcu. - Let user-space pass a hint whether mmap() should be supported on the bo and avoid access-management if not. - .. there are probably more ideas once we have benchmarks .. v2: add drm_vma_node_verify_access() helper Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 1 + drivers/gpu/drm/drm_vma_manager.c | 155 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_vma_manager.h | 39 +++++++++- 3 files changed, 192 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 1ce88c3301a1..d6122ae6bf86 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -156,6 +156,7 @@ void drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; + drm_vma_node_reset(&obj->vma_node); } EXPORT_SYMBOL(drm_gem_private_object_init); diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 3837481d5607..63b471205072 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,13 @@ * must always be page-aligned (as usual). * If you want to get a valid byte-based user-space address for a given offset, * please see drm_vma_node_offset_addr(). + * + * Additionally to offset management, the vma offset manager also handles access + * management. For every open-file context that is allowed to access a given + * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this + * open-file with the offset of the node will fail with -EACCES. To revoke + * access again, use drm_vma_node_revoke(). However, the caller is responsible + * for destroying already existing mappings, if required. */ /** @@ -279,3 +287,150 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, write_unlock(&mgr->vm_lock); } EXPORT_SYMBOL(drm_vma_offset_remove); + +/** + * drm_vma_node_allow - Add open-file to list of allowed users + * @node: Node to modify + * @filp: Open file to add + * + * Add @filp to the list of allowed open-files for this node. If @filp is + * already on this list, the ref-count is incremented. + * + * The list of allowed-users is preserved across drm_vma_offset_add() and + * drm_vma_offset_remove() calls. You may even call it if the node is currently + * not added to any offset-manager. + * + * You must remove all open-files the same number of times as you added them + * before destroying the node. Otherwise, you will leak memory. + * + * This is locked against concurrent access internally. + * + * RETURNS: + * 0 on success, negative error code on internal failure (out-of-mem) + */ +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp) +{ + struct rb_node **iter; + struct rb_node *parent = NULL; + struct drm_vma_offset_file *new, *entry; + int ret = 0; + + /* Preallocate entry to avoid atomic allocations below. It is quite + * unlikely that an open-file is added twice to a single node so we + * don't optimize for this case. OOM is checked below only if the entry + * is actually used. */ + new = kmalloc(sizeof(*entry), GFP_KERNEL); + + write_lock(&node->vm_lock); + + iter = &node->vm_files.rb_node; + + while (likely(*iter)) { + parent = *iter; + entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); + + if (filp == entry->vm_filp) { + entry->vm_count++; + goto unlock; + } else if (filp > entry->vm_filp) { + iter = &(*iter)->rb_right; + } else { + iter = &(*iter)->rb_left; + } + } + + if (!new) { + ret = -ENOMEM; + goto unlock; + } + + new->vm_filp = filp; + new->vm_count = 1; + rb_link_node(&new->vm_rb, parent, iter); + rb_insert_color(&new->vm_rb, &node->vm_files); + new = NULL; + +unlock: + write_unlock(&node->vm_lock); + kfree(new); + return ret; +} +EXPORT_SYMBOL(drm_vma_node_allow); + +/** + * drm_vma_node_revoke - Remove open-file from list of allowed users + * @node: Node to modify + * @filp: Open file to remove + * + * Decrement the ref-count of @filp in the list of allowed open-files on @node. + * If the ref-count drops to zero, remove @filp from the list. You must call + * this once for every drm_vma_node_allow() on @filp. + * + * This is locked against concurrent access internally. + * + * If @filp is not on the list, nothing is done. + */ +void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp) +{ + struct drm_vma_offset_file *entry; + struct rb_node *iter; + + write_lock(&node->vm_lock); + + iter = node->vm_files.rb_node; + while (likely(iter)) { + entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); + if (filp == entry->vm_filp) { + if (!--entry->vm_count) { + rb_erase(&entry->vm_rb, &node->vm_files); + kfree(entry); + } + break; + } else if (filp > entry->vm_filp) { + iter = iter->rb_right; + } else { + iter = iter->rb_left; + } + } + + write_unlock(&node->vm_lock); +} +EXPORT_SYMBOL(drm_vma_node_revoke); + +/** + * drm_vma_node_is_allowed - Check whether an open-file is granted access + * @node: Node to check + * @filp: Open-file to check for + * + * Search the list in @node whether @filp is currently on the list of allowed + * open-files (see drm_vma_node_allow()). + * + * This is locked against concurrent access internally. + * + * RETURNS: + * true iff @filp is on the list + */ +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp) +{ + struct drm_vma_offset_file *entry; + struct rb_node *iter; + + read_lock(&node->vm_lock); + + iter = node->vm_files.rb_node; + while (likely(iter)) { + entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); + if (filp == entry->vm_filp) + break; + else if (filp > entry->vm_filp) + iter = iter->rb_right; + else + iter = iter->rb_left; + } + + read_unlock(&node->vm_lock); + + return iter; +} +EXPORT_SYMBOL(drm_vma_node_is_allowed); diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 22eedac046ac..c18a593d1744 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -24,15 +24,24 @@ */ #include +#include #include #include #include #include #include +struct drm_vma_offset_file { + struct rb_node vm_rb; + struct file *vm_filp; + unsigned long vm_count; +}; + struct drm_vma_offset_node { + rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_node vm_rb; + struct rb_root vm_files; }; struct drm_vma_offset_manager { @@ -56,6 +65,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node); +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp); +void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp); +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp); + /** * drm_vma_offset_exact_lookup() - Look up node by exact address * @mgr: Manager object @@ -122,9 +136,8 @@ static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *m * drm_vma_node_reset() - Initialize or reset node object * @node: Node to initialize or reset * - * Reset a node to its initial state. This must be called if @node isn't - * already cleared (eg., via kzalloc) before using it with any VMA offset - * manager. + * Reset a node to its initial state. This must be called before using it with + * any VMA offset manager. * * This must not be called on an already allocated node, or you will leak * memory. @@ -132,6 +145,8 @@ static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *m static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) { memset(node, 0, sizeof(*node)); + node->vm_files = RB_ROOT; + rwlock_init(&node->vm_lock); } /** @@ -221,4 +236,22 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, drm_vma_node_size(node) << PAGE_SHIFT, 1); } +/** + * drm_vma_node_verify_access() - Access verification helper for TTM + * @node: Offset node + * @filp: Open-file + * + * This checks whether @filp is granted access to @node. It is the same as + * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM + * verify_access() callbacks. + * + * RETURNS: + * 0 if access is granted, -EACCES otherwise. + */ +static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, + struct file *filp) +{ + return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES; +} + #endif /* __DRM_VMA_MANAGER_H__ */ -- cgit v1.2.3 From ca481c9b2a3ae3598453535b8f0369f1f875d52f Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sun, 25 Aug 2013 18:28:58 +0200 Subject: drm/gem: implement vma access management We implement automatic vma mmap() access management for all drivers using gem_mmap. We use the vma manager to add each open-file that creates a gem-handle to the vma-node of the underlying gem object. Once the handle is destroyed, we drop the open-file again. This allows us to use drm_vma_node_is_allowed() on _any_ gem object to see whether an open-file is granted access. In drm_gem_mmap() we use this to verify that unprivileged users cannot guess gem offsets and map arbitrary buffers. Note that this manages access for _all_ gem users (also TTM+GEM), but the actual access checks are only done for drm_gem_mmap(). TTM drivers use the TTM mmap helpers, which need to do that separately. Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d6122ae6bf86..b2d59b2d3acc 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -298,6 +298,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) spin_unlock(&filp->table_lock); drm_gem_remove_prime_handles(obj, filp); + drm_vma_node_revoke(&obj->vma_node, filp->filp); if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, filp); @@ -357,6 +358,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, } *handlep = ret; + ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); + if (ret) { + drm_gem_handle_delete(file_priv, *handlep); + return ret; + } if (dev->driver->gem_open_object) { ret = dev->driver->gem_open_object(obj, file_priv); @@ -701,6 +707,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_device *dev = obj->dev; drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, file_priv); @@ -793,6 +800,10 @@ EXPORT_SYMBOL(drm_gem_vm_close); * the GEM object is not looked up based on its fake offset. To implement the * DRM mmap operation, drivers should use the drm_gem_mmap() function. * + * drm_gem_mmap_obj() assumes the user is granted access to the buffer while + * drm_gem_mmap() prevents unprivileged users from mapping random objects. So + * callers must verify access restrictions before calling this helper. + * * NOTE: This function has to be protected with dev->struct_mutex * * Return 0 or success or -EINVAL if the object size is smaller than the VMA @@ -841,6 +852,9 @@ EXPORT_SYMBOL(drm_gem_mmap_obj); * Look up the GEM object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object) and map it with a call to drm_gem_mmap_obj(). + * + * If the caller is not granted access to the buffer object, the mmap will fail + * with EACCES. Please see the vma manager for more information. */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { @@ -861,6 +875,9 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) if (!node) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); + } else if (!drm_vma_node_is_allowed(node, filp)) { + mutex_unlock(&dev->struct_mutex); + return -EACCES; } obj = container_of(node, struct drm_gem_object, vma_node); -- cgit v1.2.3 From acb4652703f0a452405a3ab9319594eddc41391b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sun, 25 Aug 2013 18:28:59 +0200 Subject: drm: verify vma access in TTM+GEM drivers GEM does already a good job in tracking access to gem buffers via handles and drm_vma access management. However, TTM drivers currently do not verify this during mmap(). TTM provides the verify_access() callback to test this. So fix all drivers to actually call into gem+vma to verify access instead of always returning 0. All drivers assume that user-space can only get access to TTM buffers via GEM handles. So whenever the verify_access() callback is called from ttm_bo_mmap(), the buffer must have a valid embedded gem object. This is true for all TTM+GEM drivers. But that's why this patch doesn't touch pure TTM drivers (ie, vmwgfx). v2: Switch to drm_vma_node_verify_access() to correctly return -EACCES if access was denied. Cc: Dave Airlie Cc: Alex Deucher Cc: Ben Skeggs Cc: Maarten Lankhorst Cc: Jerome Glisse Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_ttm.c | 4 +++- drivers/gpu/drm/cirrus/cirrus_ttm.c | 4 +++- drivers/gpu/drm/mgag200/mgag200_ttm.c | 4 +++- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +++- drivers/gpu/drm/qxl/qxl_ttm.c | 4 +++- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +++- 6 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index cf1c833f73ca..20fcf4ee3af0 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct ast_bo *astbo = ast_bo(bo); + + return drm_vma_node_verify_access(&astbo->gem.vma_node, filp); } static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index bf8a50669489..ae2385cc71cb 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct cirrus_bo *cirrusbo = cirrus_bo(bo); + + return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp); } static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 6cf3fa0b35cc..fd4539d9ad2c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct mgag200_bo *mgabo = mgag200_bo(bo); + + return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp); } static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4e7ee5f4155c..e4444bacd0b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1260,7 +1260,9 @@ out: static int nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct nouveau_bo *nvbo = nouveau_bo(bo); + + return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp); } static int diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 1dfd84cda2a1..037786d7c1dc 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -212,7 +212,9 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct qxl_bo *qbo = to_qxl_bo(bo); + + return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp); } static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 6c0ce8915fac..71245d6f34a2 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); + + return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); } static void radeon_move_null(struct ttm_buffer_object *bo, -- cgit v1.2.3 From ccaddfe1a2e10f50aa6f553f9791c2724b6d3c4a Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Tue, 30 Jul 2013 11:35:03 +0300 Subject: drm/tegra: hdmi: Make sure clock is enabled before dumping registers The debugfs register dumping function did not enable the HDMI clock. This led to a possible system hang when reading the debugfs entry while no HDMI cable was connected to the system. This patch makes sure that the clock is enabled during the read. Signed-off-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/gpu/host1x/drm/hdmi.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c index 01097da09f7f..bf7f02743419 100644 --- a/drivers/gpu/host1x/drm/hdmi.c +++ b/drivers/gpu/host1x/drm/hdmi.c @@ -904,6 +904,11 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_hdmi *hdmi = node->info_ent->data; + int err; + + err = clk_enable(hdmi->clk); + if (err) + return err; #define DUMP_REG(name) \ seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ @@ -1069,6 +1074,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) #undef DUMP_REG + clk_disable(hdmi->clk); + return 0; } -- cgit v1.2.3 From f5fda676e9a3991aab159418f870351bc7d45d96 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Aug 2013 13:18:25 +0300 Subject: gpu: host1x: fix an integer overflow check Tegra is a 32 bit arch. On 32 bit systems then size_t is 32 bits so "total" will never be higher than UINT_MAX because of integer overflows. We need cast to u64 first before doing the math. Also the addition earlier: unsigned int num_unpins = num_cmdbufs + num_relocs; That can overflow as well, but I think it's still safe because we check both "num_cmdbufs" and "num_relocs" again in this test. Signed-off-by: Dan Carpenter Signed-off-by: Thierry Reding --- drivers/gpu/host1x/job.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index cc807667d8f1..18a47f95e90c 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -42,12 +42,12 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, /* Check that we're not going to overflow */ total = sizeof(struct host1x_job) + - num_relocs * sizeof(struct host1x_reloc) + - num_unpins * sizeof(struct host1x_job_unpin_data) + - num_waitchks * sizeof(struct host1x_waitchk) + - num_cmdbufs * sizeof(struct host1x_job_gather) + - num_unpins * sizeof(dma_addr_t) + - num_unpins * sizeof(u32 *); + (u64)num_relocs * sizeof(struct host1x_reloc) + + (u64)num_unpins * sizeof(struct host1x_job_unpin_data) + + (u64)num_waitchks * sizeof(struct host1x_waitchk) + + (u64)num_cmdbufs * sizeof(struct host1x_job_gather) + + (u64)num_unpins * sizeof(dma_addr_t) + + (u64)num_unpins * sizeof(u32 *); if (total > ULONG_MAX) return NULL; -- cgit v1.2.3 From 745cecc07cee878a5afdda40d13f8b0901a88ebd Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Aug 2013 13:19:11 +0300 Subject: gpu: host1x: returning success instead of -ENOMEM There is a mistake here so it returns PTR_ERR(NULL) which is success instead of -ENOMEM. Signed-off-by: Dan Carpenter Signed-off-by: Thierry Reding --- drivers/gpu/host1x/job.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 18a47f95e90c..c4e1050f2252 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -466,9 +466,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) { - int err = PTR_ERR(job->gather_copy_mapped); job->gather_copy_mapped = NULL; - return err; + return -ENOMEM; } job->gather_copy_size = size; -- cgit v1.2.3 From 0d69704ae348c03bc216b01e32a0e9a2372be419 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 10 Sep 2012 12:28:36 +1000 Subject: gpu/vga_switcheroo: add driver control power feature. (v3) For optimus and powerxpress muxless we really want the GPU driver deciding when to power up/down the GPU, not userspace. This adds the ability for a driver to dynamically power up/down the GPU and remove the switcheroo from controlling it, the switcheroo reports the dynamic state to userspace also. It also adds 2 power domains, one for machine where the power switch is controlled outside the GPU D3 state, so the powerdown ordering is done correctly, and the second for the hdmi audio device to make sure it can resume for PCI config space accesses. v1.1: fix build with switcheroo off v2: add power domain support for radeon and v1 nvidia dsms v2.1: fix typo in off case v3: add audio power domain for hdmi audio + misc audio fixes v4: use PCI_SLOT macro, drop power reference on hdmi audio resume failure also. Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/nouveau/nouveau_vga.c | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 2 +- drivers/gpu/vga/vga_switcheroo.c | 147 +++++++++++++++++++++++++++++++-- include/linux/vga_switcheroo.h | 13 ++- 5 files changed, 156 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0adfe4000871..54f86242e80e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1293,7 +1293,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_register_dsm_handler(); - ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); if (ret) goto cleanup_vga_client; diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 25d3495725eb..40a09f11a600 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -79,7 +79,7 @@ nouveau_vga_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); - vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); + vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, false); } void diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..0610ca4fb6a3 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1269,7 +1269,7 @@ int radeon_device_init(struct radeon_device *rdev, /* this will fail for cards that aren't VGA class devices, just * ignore it */ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); - vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); + vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false); r = radeon_init(rdev); if (r) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cf787e1d9322..ec0ae2d1686a 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -37,6 +38,7 @@ struct vga_switcheroo_client { const struct vga_switcheroo_client_ops *ops; int id; bool active; + bool driver_power_control; struct list_head list; }; @@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler); static int register_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - int id, bool active) + int id, bool active, bool driver_power_control) { struct vga_switcheroo_client *client; @@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev, client->ops = ops; client->id = id; client->active = active; + client->driver_power_control = driver_power_control; mutex_lock(&vgasr_mutex); list_add_tail(&client->list, &vgasr_priv.clients); @@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev, } int vga_switcheroo_register_client(struct pci_dev *pdev, - const struct vga_switcheroo_client_ops *ops) + const struct vga_switcheroo_client_ops *ops, + bool driver_power_control) { return register_client(pdev, ops, -1, - pdev == vga_default_device()); + pdev == vga_default_device(), driver_power_control); } EXPORT_SYMBOL(vga_switcheroo_register_client); @@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, int id, bool active) { - return register_client(pdev, ops, id | ID_BIT_AUDIO, active); + return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false); } EXPORT_SYMBOL(vga_switcheroo_register_audio_client); @@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v) int i = 0; mutex_lock(&vgasr_mutex); list_for_each_entry(client, &vgasr_priv.clients, list) { - seq_printf(m, "%d:%s%s:%c:%s:%s\n", i, + seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i, client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", client_is_vga(client) ? "" : "-Audio", client->active ? '+' : ' ', + client->driver_power_control ? "Dyn" : "", client->pwr_state ? "Pwr" : "Off", pci_name(client->pdev)); i++; @@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file) static int vga_switchon(struct vga_switcheroo_client *client) { + if (client->driver_power_control) + return 0; if (vgasr_priv.handler->power_state) vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); /* call the driver callback to turn on device */ @@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client) static int vga_switchoff(struct vga_switcheroo_client *client) { + if (client->driver_power_control) + return 0; /* call the driver callback to turn off device */ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); if (vgasr_priv.handler->power_state) @@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, list_for_each_entry(client, &vgasr_priv.clients, list) { if (client->active || client_is_audio(client)) continue; + if (client->driver_power_control) + continue; set_audio_state(client->id, VGA_SWITCHEROO_OFF); if (client->pwr_state == VGA_SWITCHEROO_ON) vga_switchoff(client); @@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, list_for_each_entry(client, &vgasr_priv.clients, list) { if (client->active || client_is_audio(client)) continue; + if (client->driver_power_control) + continue; if (client->pwr_state == VGA_SWITCHEROO_OFF) vga_switchon(client); set_audio_state(client->id, VGA_SWITCHEROO_ON); @@ -565,3 +578,127 @@ err: return err; } EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + +static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct vga_switcheroo_client *client; + + if (!vgasr_priv.handler->power_state) + return; + + client = find_client_from_pci(&vgasr_priv.clients, pdev); + if (!client) + return; + + if (!client->driver_power_control) + return; + + vgasr_priv.handler->power_state(client->id, state); +} + +/* force a PCI device to a certain state - mainly to turn off audio clients */ + +void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) +{ + struct vga_switcheroo_client *client; + + client = find_client_from_pci(&vgasr_priv.clients, pdev); + if (!client) + return; + + if (!client->driver_power_control) + return; + + client->pwr_state = dynamic; + set_audio_state(client->id, dynamic); +} +EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch); + +/* switcheroo power domain */ +static int vga_switcheroo_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = dev->bus->pm->runtime_suspend(dev); + if (ret) + return ret; + + vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); + return 0; +} + +static int vga_switcheroo_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON); + ret = dev->bus->pm->runtime_resume(dev); + if (ret) + return ret; + + return 0; +} + +/* this version is for the case where the power switch is separate + to the device being powered down. */ +int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) +{ + /* copy over all the bus versions */ + if (dev->bus && dev->bus->pm) { + domain->ops = *dev->bus->pm; + domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend; + domain->ops.runtime_resume = vga_switcheroo_runtime_resume; + + dev->pm_domain = domain; + return 0; + } + dev->pm_domain = NULL; + return -EINVAL; +} +EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); + +static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + struct vga_switcheroo_client *client, *found = NULL; + + /* we need to check if we have to switch back on the video + device so the audio device can come back */ + list_for_each_entry(client, &vgasr_priv.clients, list) { + if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) { + found = client; + ret = pm_runtime_get_sync(&client->pdev->dev); + if (ret) { + if (ret != 1) + return ret; + } + break; + } + } + ret = dev->bus->pm->runtime_resume(dev); + + /* put the reference for the gpu */ + if (found) { + pm_runtime_mark_last_busy(&found->pdev->dev); + pm_runtime_put_autosuspend(&found->pdev->dev); + } + return ret; +} + +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) +{ + /* copy over all the bus versions */ + if (dev->bus && dev->bus->pm) { + domain->ops = *dev->bus->pm; + domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio; + + dev->pm_domain = domain; + return 0; + } + dev->pm_domain = NULL; + return -EINVAL; +} +EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index ddb419cf4530..502073a53dd3 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -45,7 +45,8 @@ struct vga_switcheroo_client_ops { #if defined(CONFIG_VGA_SWITCHEROO) void vga_switcheroo_unregister_client(struct pci_dev *dev); int vga_switcheroo_register_client(struct pci_dev *dev, - const struct vga_switcheroo_client_ops *ops); + const struct vga_switcheroo_client_ops *ops, + bool driver_power_control); int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, int id, bool active); @@ -60,11 +61,15 @@ int vga_switcheroo_process_delayed_switch(void); int vga_switcheroo_get_client_state(struct pci_dev *dev); +void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); + +int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, - const struct vga_switcheroo_client_ops *ops) { return 0; } + const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, @@ -74,6 +79,10 @@ static inline void vga_switcheroo_unregister_handler(void) {} static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } +static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} + +static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } +static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } #endif #endif /* _LINUX_VGA_SWITCHEROO_H_ */ -- cgit v1.2.3 From 246efa4a072f3a2e03010ef0b78b0974ec69c377 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 29 Jul 2013 15:19:29 +1000 Subject: snd/hda: add runtime suspend/resume on optimus support (v4) Add support for HDMI audio device on VGA cards that powerdown to D3cold using non-standard ACPI/PCI infrastructure (optimus). This does a couple of things to make it work: a) add a set of power ops for the hdmi domain, and enables them via vga_switcheroo when we are a switcheroo controlled card. This just replaces the runtime resume operation so that when the card is in D3cold the userspace pci config space access via sysfs, the vga switcheroon runtime resume gets called first and it calls the GPU resume callback before calling the sound card runtime resume. b) standard ACPI/PCI stacks won't put a device into D3cold without an ACPI handle, but since the hdmi audio devices on gpus don't have an ACPI handle, we need to manually force the device into D3cold after suspend from the switcheroo path only. c) don't try and do runtime s/r when the GPU is off. d) call runtime suspend/resume during switcheroo suspend/resume this is to make sure the runtime stack knows to try and resume the hdmi audio device for pci config space access. v2: fix incorrect runtime call suspend->resume. v3: rework irq handler to avoid false irq when we are resuming but haven't runtime resumed yet, don't bother trying D3cold, it won't work, just set it manually ourselves, move runtime s/r calls outside the main s/r hook. enable dnyamic pm properly by dropping reference. v4: put back irq handler check just wrap it with cap check Acked-by: Takashi Iwai Signed-off-by: Dave Airlie --- sound/pci/hda/hda_intel.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8860dd529520..bf5e58ec1efe 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -555,6 +555,9 @@ struct azx { #ifdef CONFIG_SND_HDA_DSP_LOADER struct azx_dev saved_azx_dev; #endif + + /* secondary power domain for hdmi audio under vga device */ + struct dev_pm_domain hdmi_pm_domain; }; #define CREATE_TRACE_POINTS @@ -1397,8 +1400,9 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id) int i, ok; #ifdef CONFIG_PM_RUNTIME - if (chip->pci->dev.power.runtime_status != RPM_ACTIVE) - return IRQ_NONE; + if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME) + if (chip->pci->dev.power.runtime_status != RPM_ACTIVE) + return IRQ_NONE; #endif spin_lock(&chip->reg_lock); @@ -1409,7 +1413,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id) } status = azx_readl(chip, INTSTS); - if (status == 0) { + if (status == 0 || status == 0xffffffff) { spin_unlock(&chip->reg_lock); return IRQ_NONE; } @@ -2971,6 +2975,12 @@ static int azx_runtime_suspend(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; + if (chip->disabled) + return 0; + + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) + return 0; + azx_stop_chip(chip); azx_enter_link_reset(chip); azx_clear_irq_pending(chip); @@ -2984,6 +2994,12 @@ static int azx_runtime_resume(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; + if (chip->disabled) + return 0; + + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) + return 0; + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) hda_display_power(true); azx_init_pci(chip); @@ -2996,6 +3012,9 @@ static int azx_runtime_idle(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; + if (chip->disabled) + return 0; + if (!power_save_controller || !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) return -EBUSY; @@ -3078,13 +3097,19 @@ static void azx_vs_set_state(struct pci_dev *pci, "%s: %s via VGA-switcheroo\n", pci_name(chip->pci), disabled ? "Disabling" : "Enabling"); if (disabled) { + pm_runtime_put_sync_suspend(&pci->dev); azx_suspend(&pci->dev); + /* when we get suspended by vga switcheroo we end up in D3cold, + * however we have no ACPI handle, so pci/acpi can't put us there, + * put ourselves there */ + pci->current_state = PCI_D3cold; chip->disabled = true; if (snd_hda_lock_devices(chip->bus)) snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n", pci_name(chip->pci)); } else { snd_hda_unlock_devices(chip->bus); + pm_runtime_get_noresume(&pci->dev); chip->disabled = false; azx_resume(&pci->dev); } @@ -3139,6 +3164,9 @@ static int register_vga_switcheroo(struct azx *chip) if (err < 0) return err; chip->vga_switcheroo_registered = 1; + + /* register as an optimus hdmi audio power domain */ + vga_switcheroo_init_domain_pm_optimus_hdmi_audio(&chip->pci->dev, &chip->hdmi_pm_domain); return 0; } #else @@ -3887,7 +3915,7 @@ static int azx_probe_continue(struct azx *chip) power_down_all_codecs(chip); azx_notifier_register(chip); azx_add_card_list(chip); - if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME) + if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo) pm_runtime_put_noidle(&pci->dev); return 0; -- cgit v1.2.3 From 13bb9cc8726c716a7f271fc2c760ba15e1fdd38c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 12 Sep 2012 15:55:05 +1000 Subject: drm: allow open of dynamic off devices. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 2 +- include/drm/drmP.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 2d2401e9c5ae..136c949307ba 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -219,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, return -EBUSY; /* No exclusive opens */ if (!drm_cpu_valid()) return -EINVAL; - if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 90833dccc919..0e3d51793b65 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1220,6 +1220,7 @@ struct drm_device { #define DRM_SWITCH_POWER_ON 0 #define DRM_SWITCH_POWER_OFF 1 #define DRM_SWITCH_POWER_CHANGING 2 +#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) -- cgit v1.2.3 From 5addcf0a5f0fadceba6bd562d0616a1c5d4c1a4d Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 10 Sep 2012 14:20:51 +1000 Subject: nouveau: add runtime PM support (v0.9) This hooks nouveau up to the runtime PM system to enable dynamic power management for secondary GPUs in switchable and optimus laptops. a) rewrite suspend/resume printks to hide them during dynamic s/r to avoid cluttering logs b) add runtime pm suspend to irq handler, crtc display, ioctl handler, connector status, c) handle hdmi audio dynamic power on/off using magic register. v0.5: make sure we hit D3 properly fix fbdev_set_suspend locking interaction, we only will poweroff if we have no active crtcs/fbcon anyways. add reference for active crtcs. sprinkle mark last busy for autosuspend timeout v0.6: allow more flexible debugging - to avoid log spam add option to enable/disable dynpm got to D3Cold v0.7: add hdmi audio support. v0.8: call autosuspend from idle, so pci config space access doesn't go straight back to sleep, this makes starting X faster. only signal usage if we actually handle the irq, otherwise usb keeps us awake. fix nv50 display active powerdown v0.9: use masking function to enable hdmi audio set busy when we fail to suspend Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/core/core/printk.c | 19 ++ drivers/gpu/drm/nouveau/core/include/core/printk.h | 13 ++ drivers/gpu/drm/nouveau/core/subdev/bios/init.c | 2 +- drivers/gpu/drm/nouveau/core/subdev/mc/base.c | 6 + drivers/gpu/drm/nouveau/dispnv04/crtc.c | 49 +++- drivers/gpu/drm/nouveau/nouveau_acpi.c | 42 +++- drivers/gpu/drm/nouveau/nouveau_connector.c | 27 ++- drivers/gpu/drm/nouveau/nouveau_display.c | 12 +- drivers/gpu/drm/nouveau/nouveau_display.h | 2 + drivers/gpu/drm/nouveau/nouveau_drm.c | 250 +++++++++++++++++++-- drivers/gpu/drm/nouveau/nouveau_drm.h | 9 + drivers/gpu/drm/nouveau/nouveau_vga.c | 14 +- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- 13 files changed, 404 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c index 6161eaf5447c..52fb2aa129e8 100644 --- a/drivers/gpu/drm/nouveau/core/core/printk.c +++ b/drivers/gpu/drm/nouveau/core/core/printk.c @@ -27,6 +27,8 @@ #include #include +int nv_printk_suspend_level = NV_DBG_DEBUG; + void nv_printk_(struct nouveau_object *object, const char *pfx, int level, const char *fmt, ...) @@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level, vprintk(mfmt, args); va_end(args); } + +#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x + +const char *nv_printk_level_to_pfx(int level) +{ + switch (level) { + CONV_LEVEL(FATAL); + CONV_LEVEL(ERROR); + CONV_LEVEL(WARN); + CONV_LEVEL(INFO); + CONV_LEVEL(DEBUG); + CONV_LEVEL(PARANOIA); + CONV_LEVEL(TRACE); + CONV_LEVEL(SPAM); + } + return NV_PRINTK_DEBUG; +} diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h index febed2ea5c80..d87836e3a704 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/printk.h +++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h @@ -15,6 +15,12 @@ struct nouveau_object; #define NV_PRINTK_TRACE KERN_DEBUG #define NV_PRINTK_SPAM KERN_DEBUG +extern int nv_printk_suspend_level; + +#define NV_DBG_SUSPEND (nv_printk_suspend_level) +#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level)) + +const char *nv_printk_level_to_pfx(int level); void __printf(4, 5) nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); @@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) +#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a) + +static inline void nv_suspend_set_printk_level(int level) +{ + nv_printk_suspend_level = level; +} + #define nv_assert(f,a...) do { \ if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 0687e6481438..2e11ea02cf87 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute) u16 data; if (execute) - nv_info(bios, "running init tables\n"); + nv_suspend(bios, "running init tables\n"); while (!ret && (data = (init_script(bios, ++i)))) { struct nvbios_init init = { .subdev = subdev, diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..2e7c5fd3de3d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -23,16 +23,20 @@ */ #include +#include static irqreturn_t nouveau_mc_intr(int irq, void *arg) { struct nouveau_mc *pmc = arg; const struct nouveau_mc_intr *map = pmc->intr_map; + struct nouveau_device *device = nv_device(pmc); struct nouveau_subdev *unit; u32 stat, intr; intr = stat = nv_rd32(pmc, 0x000100); + if (intr == 0xffffffff) + return IRQ_NONE; while (stat && map->stat) { if (stat & map->stat) { unit = nouveau_subdev(pmc, map->unit); @@ -47,6 +51,8 @@ nouveau_mc_intr(int irq, void *arg) nv_error(pmc, "unknown intr 0x%08x\n", stat); } + if (stat == IRQ_HANDLED) + pm_runtime_mark_last_busy(&device->pdev->dev); return stat ? IRQ_HANDLED : IRQ_NONE; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6413552df21c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -22,6 +22,7 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ +#include #include #include @@ -1007,13 +1008,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } +int +nouveau_crtc_set_config(struct drm_mode_set *set) +{ + struct drm_device *dev; + struct nouveau_drm *drm; + int ret; + struct drm_crtc *crtc; + bool active = false; + if (!set || !set->crtc) + return -EINVAL; + + dev = set->crtc->dev; + + /* get a pm reference here */ + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) + return ret; + + ret = drm_crtc_helper_set_config(set); + + drm = nouveau_drm(dev); + + /* if we get here with no crtcs active then we can drop a reference */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled) + active = true; + } + + pm_runtime_mark_last_busy(dev->dev); + /* if we have active crtcs and we don't have a power ref, + take the current one */ + if (active && !drm->have_disp_power_ref) { + drm->have_disp_power_ref = true; + return ret; + } + /* if we have no active crtcs, then drop the power ref + we got before */ + if (!active && drm->have_disp_power_ref) { + pm_runtime_put_autosuspend(dev->dev); + drm->have_disp_power_ref = false; + } + /* drop the power reference we got coming in here */ + pm_runtime_put_autosuspend(dev->dev); + return ret; +} + static const struct drm_crtc_funcs nv04_crtc_funcs = { .save = nv_crtc_save, .restore = nv_crtc_restore, .cursor_set = nv04_crtc_cursor_set, .cursor_move = nv04_crtc_cursor_move, .gamma_set = nv_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, + .set_config = nouveau_crtc_set_config, .page_flip = nouveau_crtc_page_flip, .destroy = nv_crtc_destroy, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index d97f20069d3e..dd7d2e182719 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -25,8 +25,27 @@ #define NOUVEAU_DSM_POWER_SPEED 0x01 #define NOUVEAU_DSM_POWER_STAMINA 0x02 -#define NOUVEAU_DSM_OPTIMUS_FN 0x1A -#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 +#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A +#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B + +#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24) +#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24) +#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1) + +#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED) + +/* result of the optimus caps function */ +#define OPTIMUS_ENABLED (1 << 0) +#define OPTIMUS_STATUS_MASK (3 << 3) +#define OPTIMUS_STATUS_OFF (0 << 3) +#define OPTIMUS_STATUS_ON_ENABLED (1 << 3) +#define OPTIMUS_STATUS_PWR_STABLE (3 << 3) +#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6) +#define OPTIMUS_CAPS_MASK (7 << 24) +#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24) + +#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27) +#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */ static struct nouveau_dsm_priv { bool dsm_detected; @@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) retval |= NOUVEAU_DSM_HAS_MUX; if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, - NOUVEAU_DSM_OPTIMUS_FN)) + NOUVEAU_DSM_OPTIMUS_CAPS)) retval |= NOUVEAU_DSM_HAS_OPT; + if (retval & NOUVEAU_DSM_HAS_OPT) { + uint32_t result; + nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, + &result); + dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n", + (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", + (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", + (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); + } if (retval) nouveau_dsm_priv.dhandle = dhandle; @@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void) if (!nouveau_dsm_priv.optimus_detected) return; - nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, - NOUVEAU_DSM_OPTIMUS_ARGS, &result); + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, + 0x3, &result); + + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, + NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); + } void nouveau_unregister_dsm_handler(void) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 4da776f344d7..c5b36f9e9a10 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -26,6 +26,8 @@ #include +#include + #include #include #include @@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) struct nouveau_encoder *nv_partner; struct nouveau_i2c_port *i2c; int type; + int ret; + enum drm_connector_status conn_status = connector_status_disconnected; /* Cleanup the previous EDID block. */ if (nv_connector->edid) { @@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } + ret = pm_runtime_get_sync(connector->dev->dev); + if (ret < 0) + return conn_status; + i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); if (i2c) { nv_connector->edid = drm_get_edid(connector, &i2c->adapter); @@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { NV_ERROR(drm, "Detected %s, but failed init\n", drm_get_connector_name(connector)); - return connector_status_disconnected; + conn_status = connector_status_disconnected; + goto out; } /* Override encoder type for DVI-I based on whether EDID @@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) } nouveau_connector_set_encoder(connector, nv_encoder); - return connector_status_connected; + conn_status = connector_status_connected; + goto out; } nv_encoder = nouveau_connector_of_detect(connector); if (nv_encoder) { nouveau_connector_set_encoder(connector, nv_encoder); - return connector_status_connected; + conn_status = connector_status_connected; + goto out; } detect_analog: @@ -311,12 +322,18 @@ detect_analog: if (helper->detect(encoder, connector) == connector_status_connected) { nouveau_connector_set_encoder(connector, nv_encoder); - return connector_status_connected; + conn_status = connector_status_connected; + goto out; } } - return connector_status_disconnected; + out: + + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + + return conn_status; } static enum drm_connector_status diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 78637afb9b94..dbcf10681ab2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -394,7 +394,7 @@ nouveau_display_suspend(struct drm_device *dev) nouveau_display_fini(dev); - NV_INFO(drm, "unpinning framebuffer(s)...\n"); + NV_SUSPEND(drm, "unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -416,7 +416,7 @@ nouveau_display_suspend(struct drm_device *dev) } void -nouveau_display_resume(struct drm_device *dev) +nouveau_display_repin(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; @@ -441,10 +441,12 @@ nouveau_display_resume(struct drm_device *dev) if (ret) NV_ERROR(drm, "Could not pin/map cursor.\n"); } +} - nouveau_fbcon_set_suspend(dev, 0); - nouveau_fbcon_zfill_all(dev); - +void +nouveau_display_resume(struct drm_device *dev) +{ + struct drm_crtc *crtc; nouveau_display_init(dev); /* Force CLUT to get re-loaded during modeset */ diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 185e74132a6d..da84f1f40ec2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -57,6 +57,7 @@ void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); void nouveau_display_fini(struct drm_device *dev); int nouveau_display_suspend(struct drm_device *dev); +void nouveau_display_repin(struct drm_device *dev); void nouveau_display_resume(struct drm_device *dev); int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -71,6 +72,7 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); +int nouveau_crtc_set_config(struct drm_mode_set *set); #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT extern int nouveau_backlight_init(struct drm_device *); extern void nouveau_backlight_exit(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index b29d04b822ae..62c6118e94c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -25,7 +25,10 @@ #include #include #include - +#include +#include +#include "drmP.h" +#include "drm_crtc_helper.h" #include #include #include @@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); +MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); +int nouveau_runtime_pm = -1; +module_param_named(runpm, nouveau_runtime_pm, int, 0400); + static struct drm_driver driver; static int @@ -296,6 +303,31 @@ static int nouveau_drm_probe(struct pci_dev *pdev, return 0; } +#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 + +static void +nouveau_get_hdmi_dev(struct drm_device *dev) +{ + struct nouveau_drm *drm = dev->dev_private; + struct pci_dev *pdev = dev->pdev; + + /* subfunction one is a hdmi audio device? */ + drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 1)); + + if (!drm->hdmi_device) { + DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1); + return; + } + + if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) { + DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class); + pci_dev_put(drm->hdmi_device); + drm->hdmi_device = NULL; + return; + } +} + static int nouveau_drm_load(struct drm_device *dev, unsigned long flags) { @@ -314,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&drm->clients); spin_lock_init(&drm->tile.lock); + nouveau_get_hdmi_dev(dev); + /* make sure AGP controller is in a consistent state before we * (possibly) execute vbios init tables (see nouveau_agp.h) */ @@ -388,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_accel_init(drm); nouveau_fbcon_init(dev); + + if (nouveau_runtime_pm != 0) { + pm_runtime_use_autosuspend(dev->dev); + pm_runtime_set_autosuspend_delay(dev->dev, 5000); + pm_runtime_set_active(dev->dev); + pm_runtime_allow(dev->dev); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put(dev->dev); + } return 0; fail_dispinit: @@ -409,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); + pm_runtime_get_sync(dev->dev); nouveau_fbcon_fini(dev); nouveau_accel_fini(drm); @@ -424,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_agp_fini(drm); nouveau_vga_fini(drm); + if (drm->hdmi_device) + pci_dev_put(drm->hdmi_device); nouveau_cli_destroy(&drm->client); return 0; } @@ -450,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev) int ret; if (dev->mode_config.num_crtc) { - NV_INFO(drm, "suspending fbcon...\n"); - nouveau_fbcon_set_suspend(dev, 1); - - NV_INFO(drm, "suspending display...\n"); + NV_SUSPEND(drm, "suspending display...\n"); ret = nouveau_display_suspend(dev); if (ret) return ret; } - NV_INFO(drm, "evicting buffers...\n"); + NV_SUSPEND(drm, "evicting buffers...\n"); ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); - NV_INFO(drm, "waiting for kernel channels to go idle...\n"); + NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n"); if (drm->cechan) { ret = nouveau_channel_idle(drm->cechan); if (ret) @@ -475,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev) return ret; } - NV_INFO(drm, "suspending client object trees...\n"); + NV_SUSPEND(drm, "suspending client object trees...\n"); if (drm->fence && nouveau_fence(drm)->suspend) { if (!nouveau_fence(drm)->suspend(drm)) return -ENOMEM; @@ -487,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev) goto fail_client; } - NV_INFO(drm, "suspending kernel object tree...\n"); + NV_SUSPEND(drm, "suspending kernel object tree...\n"); ret = nouveau_client_fini(&drm->client.base, true); if (ret) goto fail_client; @@ -501,7 +544,7 @@ fail_client: } if (dev->mode_config.num_crtc) { - NV_INFO(drm, "resuming display...\n"); + NV_SUSPEND(drm, "resuming display...\n"); nouveau_display_resume(dev); } return ret; @@ -513,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF || + drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) return 0; + if (drm_dev->mode_config.num_crtc) + nouveau_fbcon_set_suspend(drm_dev, 1); + + nv_suspend_set_printk_level(NV_DBG_INFO); ret = nouveau_do_suspend(drm_dev); if (ret) return ret; @@ -523,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); + nv_suspend_set_printk_level(NV_DBG_DEBUG); return 0; } @@ -533,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; - NV_INFO(drm, "re-enabling device...\n"); + NV_SUSPEND(drm, "re-enabling device...\n"); nouveau_agp_reset(drm); - NV_INFO(drm, "resuming kernel object tree...\n"); + NV_SUSPEND(drm, "resuming kernel object tree...\n"); nouveau_client_init(&drm->client.base); nouveau_agp_init(drm); - NV_INFO(drm, "resuming client object trees...\n"); + NV_SUSPEND(drm, "resuming client object trees...\n"); if (drm->fence && nouveau_fence(drm)->resume) nouveau_fence(drm)->resume(drm); @@ -553,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev) nouveau_pm_resume(dev); if (dev->mode_config.num_crtc) { - NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + NV_SUSPEND(drm, "resuming display...\n"); + nouveau_display_repin(dev); } + return 0; } @@ -565,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF || + drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) return 0; pci_set_power_state(pdev, PCI_D0); @@ -575,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev) return ret; pci_set_master(pdev); - return nouveau_do_resume(drm_dev); + nv_suspend_set_printk_level(NV_DBG_INFO); + ret = nouveau_do_resume(drm_dev); + if (ret) { + nv_suspend_set_printk_level(NV_DBG_DEBUG); + return ret; + } + if (drm_dev->mode_config.num_crtc) + nouveau_fbcon_set_suspend(drm_dev, 0); + + nouveau_fbcon_zfill_all(drm_dev); + nouveau_display_resume(drm_dev); + nv_suspend_set_printk_level(NV_DBG_DEBUG); + return 0; } static int nouveau_pmops_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + int ret; + + nv_suspend_set_printk_level(NV_DBG_INFO); + if (drm_dev->mode_config.num_crtc) + nouveau_fbcon_set_suspend(drm_dev, 1); - return nouveau_do_suspend(drm_dev); + ret = nouveau_do_suspend(drm_dev); + nv_suspend_set_printk_level(NV_DBG_DEBUG); + return ret; } static int nouveau_pmops_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + int ret; - return nouveau_do_resume(drm_dev); + nv_suspend_set_printk_level(NV_DBG_INFO); + ret = nouveau_do_resume(drm_dev); + if (ret) { + nv_suspend_set_printk_level(NV_DBG_DEBUG); + return ret; + } + if (drm_dev->mode_config.num_crtc) + nouveau_fbcon_set_suspend(drm_dev, 0); + nouveau_fbcon_zfill_all(drm_dev); + nouveau_display_resume(drm_dev); + nv_suspend_set_printk_level(NV_DBG_DEBUG); + return 0; } @@ -604,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) char name[32], tmpname[TASK_COMM_LEN]; int ret; + /* need to bring up power immediately if opening device */ + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) + return ret; + get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); if (ret) - return ret; + goto out_suspend; if (nv_device(drm->device)->card_type >= NV_50) { ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 0x1000, &cli->base.vm); if (ret) { nouveau_cli_destroy(cli); - return ret; + goto out_suspend; } } @@ -625,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) mutex_lock(&drm->client.mutex); list_add(&cli->head, &drm->clients); mutex_unlock(&drm->client.mutex); - return 0; + +out_suspend: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return ret; } static void @@ -634,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv) struct nouveau_cli *cli = nouveau_cli(fpriv); struct nouveau_drm *drm = nouveau_drm(dev); + pm_runtime_get_sync(dev->dev); + if (cli->abi16) nouveau_abi16_fini(cli->abi16); mutex_lock(&drm->client.mutex); list_del(&cli->head); mutex_unlock(&drm->client.mutex); + } static void @@ -647,6 +742,8 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_cli *cli = nouveau_cli(fpriv); nouveau_cli_destroy(cli); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); } static const struct drm_ioctl_desc @@ -665,12 +762,30 @@ nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), }; +long nouveau_drm_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev; + long ret; + dev = file_priv->minor->dev; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) + return ret; + + ret = drm_ioctl(filp, cmd, arg); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return ret; +} static const struct file_operations nouveau_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = nouveau_drm_ioctl, .mmap = nouveau_ttm_mmap, .poll = drm_poll, .read = drm_read, @@ -753,6 +868,90 @@ nouveau_drm_pci_table[] = { {} }; +static int nouveau_pmops_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int ret; + + if (nouveau_runtime_pm == 0) + return -EINVAL; + + drm_kms_helper_poll_disable(drm_dev); + vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); + nouveau_switcheroo_optimus_dsm(); + ret = nouveau_do_suspend(drm_dev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); + drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + return ret; +} + +static int nouveau_pmops_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct nouveau_device *device = nouveau_dev(drm_dev); + int ret; + + if (nouveau_runtime_pm == 0) + return -EINVAL; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = nouveau_do_resume(drm_dev); + nouveau_display_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ + nv_mask(device, 0x88488, (1 << 25), (1 << 25)); + vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + return ret; +} + +static int nouveau_pmops_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(drm_dev); + struct drm_crtc *crtc; + + if (nouveau_runtime_pm == 0) + return -EBUSY; + + /* are we optimus enabled? */ + if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { + DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); + return -EBUSY; + } + + /* if we have a hdmi audio device - make sure it has a driver loaded */ + if (drm->hdmi_device) { + if (!drm->hdmi_device->driver) { + DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n"); + pm_runtime_mark_last_busy(dev); + return -EBUSY; + } + } + + list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) { + if (crtc->enabled) { + DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + return -EBUSY; + } + } + pm_runtime_mark_last_busy(dev); + pm_runtime_autosuspend(dev); + /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ + return 1; +} + static const struct dev_pm_ops nouveau_pm_ops = { .suspend = nouveau_pmops_suspend, .resume = nouveau_pmops_resume, @@ -760,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = { .thaw = nouveau_pmops_thaw, .poweroff = nouveau_pmops_freeze, .restore = nouveau_pmops_resume, + .runtime_suspend = nouveau_pmops_runtime_suspend, + .runtime_resume = nouveau_pmops_runtime_resume, + .runtime_idle = nouveau_pmops_runtime_idle, }; static struct pci_driver diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 41ff7e0d403a..994fd6ec373b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv) return fpriv ? fpriv->driver_priv : NULL; } +extern int nouveau_runtime_pm; + struct nouveau_drm { struct nouveau_cli client; struct drm_device *dev; @@ -129,6 +131,12 @@ struct nouveau_drm { /* power management */ struct nouveau_pm *pm; + + /* display power reference */ + bool have_disp_power_ref; + + struct dev_pm_domain vga_pm_domain; + struct pci_dev *hdmi_device; }; static inline struct nouveau_drm * @@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev) int nouveau_pmops_suspend(struct device *); int nouveau_pmops_resume(struct device *); +#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args) #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) #define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 40a09f11a600..81638d7f2eff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, { struct drm_device *dev = pci_get_drvdata(pdev); + if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF) + return; + if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -78,8 +81,17 @@ void nouveau_vga_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; + bool runtime = false; vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); - vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, false); + + if (nouveau_runtime_pm == 1) + runtime = true; + if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) + runtime = true; + vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); + + if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) + vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain); } void diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 8b40a36c1b57..9d2092a5ed38 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1326,7 +1326,7 @@ static const struct drm_crtc_funcs nv50_crtc_func = { .cursor_set = nv50_crtc_cursor_set, .cursor_move = nv50_crtc_cursor_move, .gamma_set = nv50_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, + .set_config = nouveau_crtc_set_config, .destroy = nv50_crtc_destroy, .page_flip = nouveau_crtc_page_flip, }; -- cgit v1.2.3 From d4e4a31da334224d686d07983517831eab999798 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:52 +0100 Subject: drm: Don't export drm_find_cea_extension() any more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function is only used inside drm_edid.c. Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 5 ++--- include/drm/drm_crtc.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfc7a1ba9360..e01478515820 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2298,10 +2298,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define EDID_CEA_YCRCB422 (1 << 4) #define EDID_CEA_VCDB_QS (1 << 6) -/** +/* * Search EDID for CEA extension block. */ -u8 *drm_find_cea_extension(struct edid *edid) +static u8 *drm_find_cea_extension(struct edid *edid) { u8 *edid_ext = NULL; int i; @@ -2322,7 +2322,6 @@ u8 *drm_find_cea_extension(struct edid *edid) return edid_ext; } -EXPORT_SYMBOL(drm_find_cea_extension); /* * Calculate the alternate clock for the CEA mode diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 0a9f73e8be26..7987eff5dab2 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1032,7 +1032,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern u8 *drm_find_cea_extension(struct edid *edid); extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid); -- cgit v1.2.3 From 13ac3f5593cf0964cdb239864829e57cc6981dac Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:53 +0100 Subject: drm/edid: Fix add_cea_modes() style issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A few styles issues have crept in here, fix them before touching this code again. v2: constify arguments that can be (Ville Syrjälä) v3: constify, but better (Ville Syrjälä) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e01478515820..bb25ee2f9f25 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2441,10 +2441,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) } static int -do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) +do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) { struct drm_device *dev = connector->dev; - u8 * mode, cea_mode; + const u8 *mode; + u8 cea_mode; int modes = 0; for (mode = db; mode < db + len; mode++) { @@ -2501,8 +2502,9 @@ cea_db_offsets(const u8 *cea, int *start, int *end) static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { - u8 * cea = drm_find_cea_extension(edid); - u8 * db, dbl; + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db; + u8 dbl; int modes = 0; if (cea && cea_revision(cea) >= 3) { @@ -2516,7 +2518,7 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) dbl = cea_db_payload_len(db); if (cea_db_tag(db) == VIDEO_BLOCK) - modes += do_cea_modes (connector, db+1, dbl); + modes += do_cea_modes(connector, db + 1, dbl); } } -- cgit v1.2.3 From 7ebe1963a063daf30f95752c35244c5d49550aa9 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:54 +0100 Subject: drm/edid: Parse the HDMI CEA block and look for 4k modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HDMI 1.4 adds 4 "4k x 2k" modes in the the CEA vendor specific block. With this commit, we now parse this block and expose the 4k modes that we find there. v2: Fix the "4096x2160" string (nice catch!), add comments about do_hdmi_vsdb_modes() arguments and make it clearer that offset is relative to the end of the required fields of the HDMI VSDB (Ville Syrjälä) v3: Fix 'Unknow' typo (Simon Farnsworth) Signed-off-by: Damien Lespiau Tested-by: Cancan Feng Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67030 Reviewed-by: Simon Farnsworth Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 124 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 109 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index bb25ee2f9f25..9de573cd3683 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -931,6 +931,36 @@ static const struct drm_display_mode edid_cea_modes[] = { .vrefresh = 100, }, }; +/* + * HDMI 1.4 4k modes. + */ +static const struct drm_display_mode edid_4k_modes[] = { + /* 1 - 3840x2160@30Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 4016, 4104, 4400, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, }, + /* 2 - 3840x2160@25Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 4896, 4984, 5280, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, }, + /* 3 - 3840x2160@24Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 5116, 5204, 5500, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, }, + /* 4 - 4096x2160@24Hz (SMPTE) */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, + 4096, 5116, 5204, 5500, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, }, +}; + /*** DDC fetch and block validation ***/ static const u8 edid_header[] = { @@ -2465,6 +2495,68 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) return modes; } +/* + * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * @len: length of the CEA block payload, ie. one can access up to db[len] + * + * Parses the HDMI VSDB looking for modes to add to @connector. + */ +static int +do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) +{ + struct drm_device *dev = connector->dev; + int modes = 0, offset = 0, i; + u8 vic_len; + + if (len < 8) + goto out; + + /* no HDMI_Video_Present */ + if (!(db[8] & (1 << 5))) + goto out; + + /* Latency_Fields_Present */ + if (db[8] & (1 << 7)) + offset += 2; + + /* I_Latency_Fields_Present */ + if (db[8] & (1 << 6)) + offset += 2; + + /* the declared length is not long enough for the 2 first bytes + * of additional video format capabilities */ + offset += 2; + if (len < (8 + offset)) + goto out; + + vic_len = db[8 + offset] >> 5; + + for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { + struct drm_display_mode *newmode; + u8 vic; + + vic = db[9 + offset + i]; + + vic--; /* VICs start at 1 */ + if (vic >= ARRAY_SIZE(edid_4k_modes)) { + DRM_ERROR("Unknown HDMI VIC: %d\n", vic); + continue; + } + + newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]); + if (!newmode) + continue; + + drm_mode_probed_add(connector, newmode); + modes++; + } + +out: + return modes; +} + static int cea_db_payload_len(const u8 *db) { @@ -2496,6 +2588,21 @@ cea_db_offsets(const u8 *cea, int *start, int *end) return 0; } +static bool cea_db_is_hdmi_vsdb(const u8 *db) +{ + int hdmi_id; + + if (cea_db_tag(db) != VENDOR_BLOCK) + return false; + + if (cea_db_payload_len(db) < 5) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IDENTIFIER; +} + #define for_each_cea_db(cea, i, start, end) \ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) @@ -2519,6 +2626,8 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) if (cea_db_tag(db) == VIDEO_BLOCK) modes += do_cea_modes(connector, db + 1, dbl); + else if (cea_db_is_hdmi_vsdb(db)) + modes += do_hdmi_vsdb_modes(connector, db, dbl); } } @@ -2571,21 +2680,6 @@ monitor_name(struct detailed_timing *t, void *data) *(u8 **)data = t->data.other_data.data.str.str; } -static bool cea_db_is_hdmi_vsdb(const u8 *db) -{ - int hdmi_id; - - if (cea_db_tag(db) != VENDOR_BLOCK) - return false; - - if (cea_db_payload_len(db) < 5) - return false; - - hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); - - return hdmi_id == HDMI_IDENTIFIER; -} - /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink -- cgit v1.2.3 From 3f2f653378112c1453c0d83c81746a9225e4bc75 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:55 +0100 Subject: drm: Add support for alternate clocks of 4k modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Fix hmdi typo (Simon Farnsworth, Ville Syrjälä) Suggested-by: Ville Syrjälä Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Simon Farnsworth Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 68 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9de573cd3683..2381abd452f1 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2409,6 +2409,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) } EXPORT_SYMBOL(drm_match_cea_mode); +/* + * Calculate the alternate clock for HDMI modes (those from the HDMI vendor + * specific block). + * + * It's almost like cea_mode_alternate_clock(), we just need to add an + * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this + * one. + */ +static unsigned int +hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) +{ + if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160) + return hdmi_mode->clock; + + return cea_mode_alternate_clock(hdmi_mode); +} + +/* + * drm_match_hdmi_mode - look for a HDMI mode matching given mode + * @to_match: display mode + * + * An HDMI mode is one defined in the HDMI vendor specific block. + * + * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one. + */ +static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) +{ + u8 mode; + + if (!to_match->clock) + return 0; + + for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; + unsigned int clock1, clock2; + + /* Make sure to also match alternate clocks */ + clock1 = hdmi_mode->clock; + clock2 = hdmi_mode_alternate_clock(hdmi_mode); + + if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || + KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && + drm_mode_equal_no_clocks(to_match, hdmi_mode)) + return mode + 1; + } + return 0; +} + static int add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) { @@ -2426,18 +2474,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) * with the alternate clock for certain CEA modes. */ list_for_each_entry(mode, &connector->probed_modes, head) { - const struct drm_display_mode *cea_mode; + const struct drm_display_mode *cea_mode = NULL; struct drm_display_mode *newmode; - u8 cea_mode_idx = drm_match_cea_mode(mode) - 1; + u8 mode_idx = drm_match_cea_mode(mode) - 1; unsigned int clock1, clock2; - if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes)) - continue; + if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { + cea_mode = &edid_cea_modes[mode_idx]; + clock2 = cea_mode_alternate_clock(cea_mode); + } else { + mode_idx = drm_match_hdmi_mode(mode) - 1; + if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { + cea_mode = &edid_4k_modes[mode_idx]; + clock2 = hdmi_mode_alternate_clock(cea_mode); + } + } - cea_mode = &edid_cea_modes[cea_mode_idx]; + if (!cea_mode) + continue; clock1 = cea_mode->clock; - clock2 = cea_mode_alternate_clock(cea_mode); if (clock1 == clock2) continue; -- cgit v1.2.3 From a5ad3dcf358475dfc5ccf11e28d3822fc3c8e5fe Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:56 +0100 Subject: video/hdmi: Don't let the user of this API create invalid infoframes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To set the active aspect ratio value in the AVI infoframe today, you not only have to set the active_aspect field, but also the active_info_valid bit. Out of the 1 user of this API, we had 100% misuse, forgetting the _valid bit. This was fixed in: Author: Damien Lespiau Date: Tue Aug 6 20:32:17 2013 +0100 drm: Don't generate invalid AVI infoframes for CEA modes We can do better and derive the _valid bit from the user wanting to set the active aspect ratio. v2: Fix multi-lines comment style (Thierry Reding) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 1 - drivers/video/hdmi.c | 6 +++++- include/linux/hdmi.h | 1 - 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2381abd452f1..d76d6089106f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3259,7 +3259,6 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, frame->video_code = drm_match_cea_mode(mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; - frame->active_info_valid = 1; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; return 0; diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 635d5690dd5a..7ccc118fefed 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -96,7 +96,11 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); - if (frame->active_info_valid) + /* + * Data byte 1, bit 4 has to be set if we provide the active format + * aspect ratio + */ + if (frame->active_aspect & 0xf) ptr[0] |= BIT(4); if (frame->horizontal_bar_valid) diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index bc6743e76e37..931474c60b71 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -109,7 +109,6 @@ struct hdmi_avi_infoframe { unsigned char version; unsigned char length; enum hdmi_colorspace colorspace; - bool active_info_valid; bool horizontal_bar_valid; bool vertical_bar_valid; enum hdmi_scan_mode scan_mode; -- cgit v1.2.3 From 974e0701c5251de879624d166890fbd0ee9fc429 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:57 +0100 Subject: video/hdmi: Derive the bar data valid bit from the bar data fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just like: Author: Damien Lespiau Date: Mon Aug 12 11:53:24 2013 +0100 video/hdmi: Don't let the user of this API create invalid infoframes But this time for the horizontal/vertical bar data present bits. Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/video/hdmi.c | 5 +++-- include/linux/hdmi.h | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 7ccc118fefed..1201357f3e3c 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -103,10 +103,11 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, if (frame->active_aspect & 0xf) ptr[0] |= BIT(4); - if (frame->horizontal_bar_valid) + /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */ + if (frame->top_bar || frame->bottom_bar) ptr[0] |= BIT(3); - if (frame->vertical_bar_valid) + if (frame->left_bar || frame->right_bar) ptr[0] |= BIT(2); ptr[1] = ((frame->colorimetry & 0x3) << 6) | diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 931474c60b71..b98340b82e05 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -109,8 +109,6 @@ struct hdmi_avi_infoframe { unsigned char version; unsigned char length; enum hdmi_colorspace colorspace; - bool horizontal_bar_valid; - bool vertical_bar_valid; enum hdmi_scan_mode scan_mode; enum hdmi_colorimetry colorimetry; enum hdmi_picture_aspect picture_aspect; -- cgit v1.2.3 From 7d27becb3532d881378846e72864031977be511a Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:58 +0100 Subject: video/hdmi: Introduce helpers for the HDMI vendor specific infoframe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide the same programming model than the other infoframe types. The generic _pack() function can't handle those yet as we need to move the vendor OUI in the generic hdmi_vendor_infoframe structure to know which kind of vendor infoframe we are dealing with. v2: Fix the value of Side-by-side (half), hmdi typo, pack 3D_Ext_Data (Ville Syrjälä) v3: Future proof the sending of 3D_Ext_Data (Ville Syrjälä), Fix multi-lines comment style (Thierry Reding) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/video/hdmi.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/hdmi.h | 26 +++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 1201357f3e3c..4c42bcb86535 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -287,6 +287,96 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, } EXPORT_SYMBOL(hdmi_audio_infoframe_pack); +/** + * hdmi_hdmi_infoframe_init() - initialize an HDMI vendor infoframe + * @frame: HDMI vendor infoframe + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame) +{ + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_VENDOR; + frame->version = 1; + + /* + * 0 is a valid value for s3d_struct, so we use a special "not set" + * value + */ + frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID; + + return 0; +} +EXPORT_SYMBOL(hdmi_hdmi_infoframe_init); + +/** + * hdmi_hdmi_infoframe_pack() - write a HDMI vendor infoframe to binary buffer + * @frame: HDMI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, + void *buffer, size_t size) +{ + u8 *ptr = buffer; + size_t length; + + /* empty info frame */ + if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID) + return -EINVAL; + + /* only one of those can be supplied */ + if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return -EINVAL; + + /* for side by side (half) we also need to provide 3D_Ext_Data */ + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + frame->length = 6; + else + frame->length = 5; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* HDMI OUI */ + ptr[4] = 0x03; + ptr[5] = 0x0c; + ptr[6] = 0x00; + + if (frame->vic) { + ptr[7] = 0x1 << 5; /* video format */ + ptr[8] = frame->vic; + } else { + ptr[7] = 0x2 << 5; /* video format */ + ptr[8] = (frame->s3d_struct & 0xf) << 4; + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + ptr[9] = (frame->s3d_ext_data & 0xf) << 4; + } + + hdmi_infoframe_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_hdmi_infoframe_pack); + /** * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary * buffer diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index b98340b82e05..e733252c2b5d 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -234,11 +234,37 @@ struct hdmi_vendor_infoframe { ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); +enum hdmi_3d_structure { + HDMI_3D_STRUCTURE_INVALID = -1, + HDMI_3D_STRUCTURE_FRAME_PACKING = 0, + HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE, + HDMI_3D_STRUCTURE_LINE_ALTERNATIVE, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL, + HDMI_3D_STRUCTURE_L_DEPTH, + HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH, + HDMI_3D_STRUCTURE_TOP_AND_BOTTOM, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, +}; + +struct hdmi_hdmi_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + u8 vic; + enum hdmi_3d_structure s3d_struct; + unsigned int s3d_ext_data; +}; + +int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame); +ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, + void *buffer, size_t size); + union hdmi_infoframe { struct hdmi_any_infoframe any; struct hdmi_avi_infoframe avi; struct hdmi_spd_infoframe spd; struct hdmi_vendor_infoframe vendor; + struct hdmi_hdmi_infoframe hdmi; struct hdmi_audio_infoframe audio; }; -- cgit v1.2.3 From a26a58e89a4e5f2ab006b6ea2b8f3c9a97ae2e77 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:58:59 +0100 Subject: gpu: host1x: Port the HDMI vendor infoframe code the common helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I just wrote the bits to define and pack HDMI vendor specific infoframe. Port the host1x driver to use those so I can refactor the infoframe code a bit more. This changes the length of the infoframe payload from 6 to 5, which is enough for the "frame packing" stereo format. v2: Pimp up the commit message with the note about the length (Ville Syrjälä) Cc: Thierry Reding Cc: Terje Bergström Cc: linux-tegra@vger.kernel.org Signed-off-by: Damien Lespiau Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/host1x/drm/hdmi.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c index 01097da09f7f..b5489187a163 100644 --- a/drivers/gpu/host1x/drm/hdmi.c +++ b/drivers/gpu/host1x/drm/hdmi.c @@ -539,7 +539,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) { - struct hdmi_vendor_infoframe frame; + struct hdmi_hdmi_infoframe frame; unsigned long value; u8 buffer[10]; ssize_t err; @@ -551,26 +551,10 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) return; } - memset(&frame, 0, sizeof(frame)); + hdmi_hdmi_infoframe_init(&frame); + frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING; - frame.type = HDMI_INFOFRAME_TYPE_VENDOR; - frame.version = 0x01; - frame.length = 6; - - frame.data[0] = 0x03; /* regid0 */ - frame.data[1] = 0x0c; /* regid1 */ - frame.data[2] = 0x00; /* regid2 */ - frame.data[3] = 0x02 << 5; /* video format */ - - /* TODO: 74 MHz limit? */ - if (1) { - frame.data[4] = 0x00 << 4; /* 3D structure */ - } else { - frame.data[4] = 0x08 << 4; /* 3D structure */ - frame.data[5] = 0x00 << 4; /* 3D ext. data */ - } - - err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); + err = hdmi_hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n", err); -- cgit v1.2.3 From c782d2e73d1e69c863d03945907bc7fbc879a778 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:00 +0100 Subject: drm/edid: Move HDMI_IDENTIFIER to hdmi.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We'll need the HDMI OUI for the HDMI vendor infoframe data, so let's move the DRM one to hdmi.h, might as well use the hdmi header to store some hdmi defines. (Note that, in fact, infoframes are part of the CEA-861 standard, and only the HDMI vendor specific infoframe is special to HDMI, but details..) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 1 - include/linux/hdmi.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index d76d6089106f..3aa653fd03a7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2317,7 +2317,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, return closure.modes; } -#define HDMI_IDENTIFIER 0x000C03 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index e733252c2b5d..37e0cd755284 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -18,6 +18,7 @@ enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_AUDIO = 0x84, }; +#define HDMI_IDENTIFIER 0x000c03 #define HDMI_INFOFRAME_HEADER_SIZE 4 #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 -- cgit v1.2.3 From af3e95b40720cdf301eb85387c0a3dc4067cc551 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:01 +0100 Subject: video/hdmi: Hook the HDMI vendor infoframe with the generic _pack() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With this last bit, hdmi_infoframe_pack() is now able to pack any infoframe we support. At the same time, because it's impractical to make two commits out of this, we get rid of the version that encourages the open coding of the vendor infoframe packing. We can do so because the only user of this API has been ported in: Author: Damien Lespiau Date: Mon Aug 12 18:08:37 2013 +0100 gpu: host1x: Port the HDMI vendor infoframe code the common helpers v2: Change oui to be an unsigned int (Ville Syrjälä) Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/video/hdmi.c | 46 ++++++++++------------------------------------ include/linux/hdmi.h | 24 ++++++++++++------------ 2 files changed, 22 insertions(+), 48 deletions(-) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 4c42bcb86535..fbccb88c2620 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -300,6 +300,8 @@ int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame) frame->type = HDMI_INFOFRAME_TYPE_VENDOR; frame->version = 1; + frame->oui = HDMI_IDENTIFIER; + /* * 0 is a valid value for s3d_struct, so we use a special "not set" * value @@ -377,46 +379,18 @@ ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, } EXPORT_SYMBOL(hdmi_hdmi_infoframe_pack); -/** - * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary - * buffer - * @frame: HDMI vendor infoframe - * @buffer: destination buffer - * @size: size of buffer - * - * Packs the information contained in the @frame structure into a binary - * representation that can be written into the corresponding controller - * registers. Also computes the checksum as required by section 5.3.5 of - * the HDMI 1.4 specification. - * - * Returns the number of bytes packed into the binary buffer or a negative - * error code on failure. +/* + * hdmi_vendor_infoframe_pack() - write a vendor infoframe to binary buffer */ -ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, - void *buffer, size_t size) +static ssize_t hdmi_vendor_infoframe_pack(union hdmi_vendor_infoframe *frame, + void *buffer, size_t size) { - u8 *ptr = buffer; - size_t length; - - length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; - - if (size < length) - return -ENOSPC; - - memset(buffer, 0, size); - - ptr[0] = frame->type; - ptr[1] = frame->version; - ptr[2] = frame->length; - ptr[3] = 0; /* checksum */ - - memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length); - - hdmi_infoframe_checksum(buffer, length); + /* we only know about HDMI vendor infoframes */ + if (frame->any.oui != HDMI_IDENTIFIER) + return -EINVAL; - return length; + return hdmi_hdmi_infoframe_pack(&frame->hdmi, buffer, size); } -EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); /** * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 37e0cd755284..e24d850a8ee6 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -225,16 +225,6 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, void *buffer, size_t size); -struct hdmi_vendor_infoframe { - enum hdmi_infoframe_type type; - unsigned char version; - unsigned char length; - u8 data[27]; -}; - -ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, - void *buffer, size_t size); - enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, HDMI_3D_STRUCTURE_FRAME_PACKING = 0, @@ -251,6 +241,7 @@ struct hdmi_hdmi_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; + unsigned int oui; u8 vic; enum hdmi_3d_structure s3d_struct; unsigned int s3d_ext_data; @@ -260,12 +251,21 @@ int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame); ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, void *buffer, size_t size); +union hdmi_vendor_infoframe { + struct { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + } any; + struct hdmi_hdmi_infoframe hdmi; +}; + union hdmi_infoframe { struct hdmi_any_infoframe any; struct hdmi_avi_infoframe avi; struct hdmi_spd_infoframe spd; - struct hdmi_vendor_infoframe vendor; - struct hdmi_hdmi_infoframe hdmi; + union hdmi_vendor_infoframe vendor; struct hdmi_audio_infoframe audio; }; -- cgit v1.2.3 From ae84b900b009589a7017a1f8f060edd7de501642 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:02 +0100 Subject: video/hdmi: Use hdmi_vendor_infoframe for the HDMI specific infoframe We just got rid of the version of hdmi_vendor_infoframe that had a byte array for anyone to poke at. It's now time to shuffle around the naming of hdmi_hdmi_infoframe to make hdmi_vendor_infoframe become the HDMI vendor specific structure. Cc: Thierry Reding Signed-off-by: Damien Lespiau Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/host1x/drm/hdmi.c | 6 +++--- drivers/video/hdmi.c | 25 +++++++++++++------------ include/linux/hdmi.h | 15 ++++++++------- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c index b5489187a163..52e3c9641a0f 100644 --- a/drivers/gpu/host1x/drm/hdmi.c +++ b/drivers/gpu/host1x/drm/hdmi.c @@ -539,7 +539,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) { - struct hdmi_hdmi_infoframe frame; + struct hdmi_vendor_infoframe frame; unsigned long value; u8 buffer[10]; ssize_t err; @@ -551,10 +551,10 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) return; } - hdmi_hdmi_infoframe_init(&frame); + hdmi_vendor_infoframe_init(&frame); frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING; - err = hdmi_hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); + err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n", err); diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index fbccb88c2620..4f73167c8647 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -288,12 +288,12 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, EXPORT_SYMBOL(hdmi_audio_infoframe_pack); /** - * hdmi_hdmi_infoframe_init() - initialize an HDMI vendor infoframe + * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe * @frame: HDMI vendor infoframe * * Returns 0 on success or a negative error code on failure. */ -int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame) +int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) { memset(frame, 0, sizeof(*frame)); @@ -310,10 +310,10 @@ int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame) return 0; } -EXPORT_SYMBOL(hdmi_hdmi_infoframe_init); +EXPORT_SYMBOL(hdmi_vendor_infoframe_init); /** - * hdmi_hdmi_infoframe_pack() - write a HDMI vendor infoframe to binary buffer + * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer * @frame: HDMI infoframe * @buffer: destination buffer * @size: size of buffer @@ -326,7 +326,7 @@ EXPORT_SYMBOL(hdmi_hdmi_infoframe_init); * Returns the number of bytes packed into the binary buffer or a negative * error code on failure. */ -ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, +ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size) { u8 *ptr = buffer; @@ -377,19 +377,20 @@ ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, return length; } -EXPORT_SYMBOL(hdmi_hdmi_infoframe_pack); +EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); /* - * hdmi_vendor_infoframe_pack() - write a vendor infoframe to binary buffer + * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer */ -static ssize_t hdmi_vendor_infoframe_pack(union hdmi_vendor_infoframe *frame, - void *buffer, size_t size) +static ssize_t +hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame, + void *buffer, size_t size) { /* we only know about HDMI vendor infoframes */ if (frame->any.oui != HDMI_IDENTIFIER) return -EINVAL; - return hdmi_hdmi_infoframe_pack(&frame->hdmi, buffer, size); + return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size); } /** @@ -422,8 +423,8 @@ hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size) length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size); break; case HDMI_INFOFRAME_TYPE_VENDOR: - length = hdmi_vendor_infoframe_pack(&frame->vendor, - buffer, size); + length = hdmi_vendor_any_infoframe_pack(&frame->vendor, + buffer, size); break; default: WARN(1, "Bad infoframe type %d\n", frame->any.type); diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index e24d850a8ee6..d4ae12c7931b 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -237,7 +237,8 @@ enum hdmi_3d_structure { HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, }; -struct hdmi_hdmi_infoframe { + +struct hdmi_vendor_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; @@ -247,25 +248,25 @@ struct hdmi_hdmi_infoframe { unsigned int s3d_ext_data; }; -int hdmi_hdmi_infoframe_init(struct hdmi_hdmi_infoframe *frame); -ssize_t hdmi_hdmi_infoframe_pack(struct hdmi_hdmi_infoframe *frame, - void *buffer, size_t size); +int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); +ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size); -union hdmi_vendor_infoframe { +union hdmi_vendor_any_infoframe { struct { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned int oui; } any; - struct hdmi_hdmi_infoframe hdmi; + struct hdmi_vendor_infoframe hdmi; }; union hdmi_infoframe { struct hdmi_any_infoframe any; struct hdmi_avi_infoframe avi; struct hdmi_spd_infoframe spd; - union hdmi_vendor_infoframe vendor; + union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; }; -- cgit v1.2.3 From 83dd000865eaaeb0799bf5e6d12f8d8cdb740e91 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:03 +0100 Subject: drm: Add a helper to forge HDMI vendor infoframes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can then be used by DRM drivers to setup their vendor infoframes. v2: Fix hmdi typo (Simon Farnsworth) v3: Adapt to the hdmi_vendor_infoframe rename Signed-off-by: Damien Lespiau Reviewed-by: Simon Farnsworth Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 36 ++++++++++++++++++++++++++++++++++++ include/drm/drm_edid.h | 4 ++++ 2 files changed, 40 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3aa653fd03a7..ed3505fe8ace 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3263,3 +3263,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, return 0; } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); + +/** + * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with + * data from a DRM display mode + * @frame: HDMI vendor infoframe + * @mode: DRM display mode + * + * Note that there's is a need to send HDMI vendor infoframes only when using a + * 4k or stereoscopic 3D mode. So when giving any other mode as input this + * function will return -EINVAL, error that can be safely ignored. + * + * Returns 0 on success or a negative error code on failure. + */ +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + const struct drm_display_mode *mode) +{ + int err; + u8 vic; + + if (!frame || !mode) + return -EINVAL; + + vic = drm_match_hdmi_mode(mode); + if (!vic) + return -EINVAL; + + err = hdmi_vendor_infoframe_init(frame); + if (err < 0) + return err; + + frame->vic = vic; + + return 0; +} +EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index fc481fc17085..7b75621fda4c 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -256,6 +256,7 @@ struct drm_encoder; struct drm_connector; struct drm_display_mode; struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); @@ -268,5 +269,8 @@ int drm_load_edid_firmware(struct drm_connector *connector); int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode); +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + const struct drm_display_mode *mode); #endif /* __DRM_EDID_H__ */ -- cgit v1.2.3 From c8bb75afff8eaed89476a00f733c666e1b44115b Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:04 +0100 Subject: drm/i915/hdmi: Write HDMI vendor specific infoframes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With all the common infoframe bits now in place, we can finally write the vendor specific infoframes in our driver. Signed-off-by: Damien Lespiau Reviewed-by: Ville Syrjälä Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_hdmi.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fab94be89dfa..88a2c0792f26 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4151,6 +4151,8 @@ _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) +#define HSW_TVIDEO_DIP_VS_DATA(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) #define HSW_TVIDEO_DIP_GCP(trans) \ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index dd4fa35e0a85..f27b91eeeb64 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -74,6 +74,8 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) return VIDEO_DIP_SELECT_AVI; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_SELECT_SPD; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_SELECT_VENDOR; default: DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; @@ -87,6 +89,8 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) return VIDEO_DIP_ENABLE_AVI; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_ENABLE_VENDOR; default: DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; @@ -100,6 +104,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) return VIDEO_DIP_ENABLE_AVI_HSW; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD_HSW; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_ENABLE_VS_HSW; default: DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; @@ -114,6 +120,8 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); case HDMI_INFOFRAME_TYPE_SPD: return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); + case HDMI_INFOFRAME_TYPE_VENDOR: + return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); default: DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; @@ -392,6 +400,21 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) intel_write_infoframe(encoder, &frame); } +static void +intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + union hdmi_infoframe frame; + int ret; + + ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, + adjusted_mode); + if (ret < 0) + return; + + intel_write_infoframe(encoder, &frame); +} + static void g4x_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { @@ -454,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void ibx_set_infoframes(struct drm_encoder *encoder, @@ -515,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void cpt_set_infoframes(struct drm_encoder *encoder, @@ -550,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void vlv_set_infoframes(struct drm_encoder *encoder, @@ -584,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void hsw_set_infoframes(struct drm_encoder *encoder, @@ -611,6 +638,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void intel_hdmi_mode_set(struct intel_encoder *encoder) -- cgit v1.2.3 From 6cb3b7f1c013fd4bea41e16ee557bcb2f1561787 Mon Sep 17 00:00:00 2001 From: "Lespiau, Damien" Date: Mon, 19 Aug 2013 16:59:05 +0100 Subject: video/hdmi: Rename HDMI_IDENTIFIER to HDMI_IEEE_OUI HDMI_IDENTIFIER was felt too generic, rename it to what it is, the IEEE OUI corresponding to HDMI Licensing, LLC. http://standards.ieee.org/develop/regauth/oui/oui.txt Cc: Thierry Reding Signed-off-by: Damien Lespiau Reviewed-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 2 +- drivers/video/hdmi.c | 4 ++-- include/linux/hdmi.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ed3505fe8ace..a207cc3f2c57 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2655,7 +2655,7 @@ static bool cea_db_is_hdmi_vsdb(const u8 *db) hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); - return hdmi_id == HDMI_IDENTIFIER; + return hdmi_id == HDMI_IEEE_OUI; } #define for_each_cea_db(cea, i, start, end) \ diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 4f73167c8647..9e758a8f890d 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -300,7 +300,7 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) frame->type = HDMI_INFOFRAME_TYPE_VENDOR; frame->version = 1; - frame->oui = HDMI_IDENTIFIER; + frame->oui = HDMI_IEEE_OUI; /* * 0 is a valid value for s3d_struct, so we use a special "not set" @@ -387,7 +387,7 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame, void *buffer, size_t size) { /* we only know about HDMI vendor infoframes */ - if (frame->any.oui != HDMI_IDENTIFIER) + if (frame->any.oui != HDMI_IEEE_OUI) return -EINVAL; return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size); diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index d4ae12c7931b..9231be9e90a2 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -18,7 +18,7 @@ enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_AUDIO = 0x84, }; -#define HDMI_IDENTIFIER 0x000c03 +#define HDMI_IEEE_OUI 0x000c03 #define HDMI_INFOFRAME_HEADER_SIZE 4 #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 -- cgit v1.2.3 From 1793126fcebd7c18834f95d43b55e387a8803aa8 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sun, 25 Aug 2013 18:29:00 +0200 Subject: drm: implement experimental render nodes Render nodes provide an API for userspace to use non-privileged GPU commands without any running DRM-Master. It is useful for offscreen rendering, GPGPU clients, and normal render clients which do not perform modesetting. Compared to legacy clients, render clients no longer need any authentication to perform client ioctls. Instead, user-space controls render/client access to GPUs via filesystem access-modes on the render-node. Once a render-node was opened, a client has full access to the client/render operations on the GPU. However, no modesetting or ioctls that affect global state are allowed on render nodes. To prevent privilege-escalation, drivers must explicitly state that they support render nodes. They must mark their render-only ioctls as DRM_RENDER_ALLOW so render clients can use them. Furthermore, they must support clients without any attached master. If filesystem access-modes are not enough for fine-grained access control to render nodes (very unlikely, considering the versaitlity of FS-ACLs), you may still fall-back to fd-passing from server to client (which allows arbitrary access-control). However, note that revoking access is currently impossible and unlikely to get implemented. Note: Render clients no longer have any associated DRM-Master as they are supposed to be independent of any server state. DRM core highly depends on file_priv->master to be non-NULL for modesetting/ctx/etc. commands. Therefore, drivers must be very careful to not require DRM-Master if they support DRIVER_RENDER. So far render-nodes are protected by "drm_rnodes". As long as this module-parameter is not set to 1, a driver will not create render nodes. This allows us to experiment with the API a bit before we stabilize it. v2: drop insecure GEM_FLINK to force use of dmabuf Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- Documentation/DocBook/drm.tmpl | 69 ++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_drv.c | 13 ++++---- drivers/gpu/drm/drm_fops.c | 14 ++++----- drivers/gpu/drm/drm_pci.c | 9 ++++++ drivers/gpu/drm/drm_platform.c | 9 ++++++ drivers/gpu/drm/drm_stub.c | 10 ++++++ drivers/gpu/drm/drm_usb.c | 9 ++++++ include/drm/drmP.h | 9 ++++++ 8 files changed, 129 insertions(+), 13 deletions(-) diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 9fc8ed4ac0f4..ed1d6d289022 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -205,6 +205,12 @@ Driver implements DRM PRIME buffer sharing. + + DRIVER_RENDER + + Driver supports dedicated render nodes. + + @@ -2644,6 +2650,69 @@ int (*resume) (struct drm_device *); info, since man pages should cover the rest. + + + + Render nodes + + DRM core provides multiple character-devices for user-space to use. + Depending on which device is opened, user-space can perform a different + set of operations (mainly ioctls). The primary node is always created + and called card<num>. Additionally, a currently + unused control node, called controlD<num> is also + created. The primary node provides all legacy operations and + historically was the only interface used by userspace. With KMS, the + control node was introduced. However, the planned KMS control interface + has never been written and so the control node stays unused to date. + + + With the increased use of offscreen renderers and GPGPU applications, + clients no longer require running compositors or graphics servers to + make use of a GPU. But the DRM API required unprivileged clients to + authenticate to a DRM-Master prior to getting GPU access. To avoid this + step and to grant clients GPU access without authenticating, render + nodes were introduced. Render nodes solely serve render clients, that + is, no modesetting or privileged ioctls can be issued on render nodes. + Only non-global rendering commands are allowed. If a driver supports + render nodes, it must advertise it via the DRIVER_RENDER + DRM driver capability. If not supported, the primary node must be used + for render clients together with the legacy drmAuth authentication + procedure. + + + If a driver advertises render node support, DRM core will create a + separate render node called renderD<num>. There will + be one render node per device. No ioctls except PRIME-related ioctls + will be allowed on this node. Especially GEM_OPEN will be + explicitly prohibited. Render nodes are designed to avoid the + buffer-leaks, which occur if clients guess the flink names or mmap + offsets on the legacy interface. Additionally to this basic interface, + drivers must mark their driver-dependent render-only ioctls as + DRM_RENDER_ALLOW so render clients can use them. Driver + authors must be careful not to allow any privileged ioctls on render + nodes. + + + With render nodes, user-space can now control access to the render node + via basic file-system access-modes. A running graphics server which + authenticates clients on the privileged primary/legacy node is no longer + required. Instead, a client can open the render node and is immediately + granted GPU access. Communication between clients (or servers) is done + via PRIME. FLINK from render node to legacy node is not supported. New + clients must not use the insecure FLINK interface. + + + Besides dropping all modeset/global ioctls, render nodes also drop the + DRM-Master concept. There is no reason to associate render clients with + a DRM-Master as they are independent of any graphics server. Besides, + they must work without any running master, anyway. + Drivers must be able to run without a master object if they support + render nodes. If, on the other hand, a driver requires shared state + between clients which is visible to user-space and accessible beyond + open-file boundaries, they cannot support render nodes. + + + diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 288da3dc2a09..e572dd20bdee 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -130,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), @@ -420,9 +420,10 @@ long drm_ioctl(struct file *filp, DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || - ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || + ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) || ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || - (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { + (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) || + (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) { retcode = -EACCES; } else { if (cmd & (IOC_IN | IOC_OUT)) { diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 136c949307ba..4be8e09a32ef 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -262,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp, goto out_prime_destroy; } - - /* if there is no current master make this fd it */ + /* if there is no current master make this fd it, but do not create + * any master object for render clients */ mutex_lock(&dev->struct_mutex); - if (!priv->minor->master) { + if (!priv->minor->master && !drm_is_render_client(priv)) { /* create a new master */ priv->minor->master = drm_master_create(priv->minor); if (!priv->minor->master) { @@ -303,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp, goto out_close; } } - mutex_unlock(&dev->struct_mutex); - } else { + } else if (!drm_is_render_client(priv)) { /* get a reference to the master */ priv->master = drm_master_get(priv->minor->master); - mutex_unlock(&dev->struct_mutex); } + mutex_unlock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex); list_add(&priv->lhead, &dev->filelist); @@ -478,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp) iput(container_of(dev->dev_mapping, struct inode, i_data)); /* drop the reference held my the file priv */ - drm_master_put(&file_priv->master); + if (file_priv->master) + drm_master_put(&file_priv->master); file_priv->is_master = 0; list_del(&file_priv->lhead); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 3fca2db1c40c..1f96cee6eee8 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -354,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, goto err_g2; } + if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { + ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); + if (ret) + goto err_g21; + } + if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) goto err_g3; @@ -383,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, err_g4: drm_put_minor(&dev->primary); err_g3: + if (dev->render) + drm_put_minor(&dev->render); +err_g21: if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); err_g2: diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 400024b6d512..f7a18c6ba4c4 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -69,6 +69,12 @@ static int drm_get_platform_dev(struct platform_device *platdev, goto err_g1; } + if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { + ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); + if (ret) + goto err_g11; + } + ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); if (ret) goto err_g2; @@ -100,6 +106,9 @@ static int drm_get_platform_dev(struct platform_device *platdev, err_g3: drm_put_minor(&dev->primary); err_g2: + if (dev->render) + drm_put_minor(&dev->render); +err_g11: if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); err_g1: diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index e30bb0d7c67a..e7eb0276f7f1 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -40,6 +40,9 @@ unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); +unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */ +EXPORT_SYMBOL(drm_rnodes); + unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ EXPORT_SYMBOL(drm_vblank_offdelay); @@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); +MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); +module_param_named(rnodes, drm_rnodes, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); @@ -446,6 +451,9 @@ void drm_put_dev(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); + if (dev->render) + drm_put_minor(&dev->render); + if (driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); @@ -462,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev) /* for a USB device */ if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_unplug_minor(dev->control); + if (dev->render) + drm_unplug_minor(dev->render); drm_unplug_minor(dev->primary); mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 34a156f0c336..87664723b9ce 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface, if (ret) goto err_g1; + if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { + ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); + if (ret) + goto err_g11; + } + ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); if (ret) goto err_g2; @@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface, err_g3: drm_put_minor(&dev->primary); err_g2: + if (dev->render) + drm_put_minor(&dev->render); +err_g11: drm_put_minor(&dev->control); err_g1: kfree(dev); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0e3d51793b65..290734191f72 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -145,6 +145,7 @@ int drm_err(const char *func, const char *format, ...); #define DRIVER_GEM 0x1000 #define DRIVER_MODESET 0x2000 #define DRIVER_PRIME 0x4000 +#define DRIVER_RENDER 0x8000 #define DRIVER_BUS_PCI 0x1 #define DRIVER_BUS_PLATFORM 0x2 @@ -290,6 +291,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_ROOT_ONLY 0x4 #define DRM_CONTROL_ALLOW 0x8 #define DRM_UNLOCKED 0x10 +#define DRM_RENDER_ALLOW 0x20 struct drm_ioctl_desc { unsigned int cmd; @@ -1204,6 +1206,7 @@ struct drm_device { unsigned int agp_buffer_token; struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ + struct drm_minor *render; /**< render node for card */ struct drm_mode_config mode_config; /**< Current mode config */ @@ -1251,6 +1254,11 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev) return mutex_is_locked(&dev->mode_config.mutex); } +static inline bool drm_is_render_client(struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + /******************************************************************/ /** \name Internal function definitions */ /*@{*/ @@ -1450,6 +1458,7 @@ extern void drm_put_dev(struct drm_device *dev); extern int drm_put_minor(struct drm_minor **minor); extern void drm_unplug_dev(struct drm_device *dev); extern unsigned int drm_debug; +extern unsigned int drm_rnodes; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; -- cgit v1.2.3 From 14bbf20c8839eb595753712e15b8786f9cdc5ed8 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 26 Aug 2013 15:15:37 +0800 Subject: drm/vmwgfx: fix error return code in vmw_driver_load() Fix to return -ENOMEM in the fence manager init error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Acked-by: Dmitry Torokhov Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0dcfa6b76c45..1a90f0a2f7e5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->fman = vmw_fence_manager_init(dev_priv); - if (unlikely(dev_priv->fman == NULL)) + if (unlikely(dev_priv->fman == NULL)) { + ret = -ENOMEM; goto out_no_fman; + } vmw_kms_save_vga(dev_priv); -- cgit v1.2.3 From 0adb23709ba9dd87d8bfa1ee349482ac8ec0730a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Aug 2013 23:46:02 +0300 Subject: drm/prime: double lock typo There is a typo so deadlocks on error instead of unlocking. Signed-off-by: Dan Carpenter Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7ae2bfcab70e..276d470f7b3e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -552,7 +552,7 @@ fail: */ drm_gem_handle_delete(file_priv, *handle); out_unlock: - mutex_lock(&dev->object_name_lock); + mutex_unlock(&dev->object_name_lock); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3 From 807ac202f20aa0a5e991851931dbfa4e4fac558d Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 22 Aug 2013 19:06:08 +0100 Subject: drm: Remove the dithering_mode_property field Unfortunately, I haven't been thorough enough in: commit ddecb10cf402a8325579f298fd4986a90f33496b Author: Lespiau, Damien Date: Tue Aug 20 00:53:04 2013 +0100 drm: Remove drm_mode_create_dithering_property() And forgot to remove the dithering_mode_property member of struct drm_mode_config. Signed-off-by: Damien Lespiau Signed-off-by: Dave Airlie --- include/drm/drm_crtc.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 7987eff5dab2..c4b1e6311467 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -837,7 +837,6 @@ struct drm_mode_config { /* Optional properties */ struct drm_property *scaling_mode_property; - struct drm_property *dithering_mode_property; struct drm_property *dirty_info_property; /* dumb ioctl parameters */ -- cgit v1.2.3 From 9c784855067a8d10cef6088b14a58083e3918fdc Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 28 Aug 2013 12:04:14 +0200 Subject: drm/prime: Remove PRIME handles only if supported Drivers that don't support PRIME will not have initialized the PRIME specific private component of struct drm_file. If called for such drivers, the drm_gem_remove_prime_handles() function will crash. Fix it by checking for PRIME support prior to removing the PRIME handles. Signed-off-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b2d59b2d3acc..49293bdc972a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -297,7 +297,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); - drm_gem_remove_prime_handles(obj, filp); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, filp); drm_vma_node_revoke(&obj->vma_node, filp->filp); if (dev->driver->gem_close_object) @@ -706,7 +707,8 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct drm_device *dev = obj->dev; - drm_gem_remove_prime_handles(obj, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv->filp); if (dev->driver->gem_close_object) -- cgit v1.2.3 From e1e9c90eefbed92cb2142072918d9c5d1051256f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 22 Aug 2013 15:42:50 +0300 Subject: drm/omap: tiler: clear buffer properly We're taking the sizeof() the wrong thing so it doesn't clear the whole buffer. Signed-off-by: Dan Carpenter Acked-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 9b794c933c81..acf667859cb6 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg) goto error; for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { - memset(map, 0, sizeof(h_adj * sizeof(*map))); + memset(map, 0, h_adj * sizeof(*map)); memset(global_map, ' ', (w_adj + 1) * h_adj); for (i = 0; i < omap_dmm->container_height; i++) { -- cgit v1.2.3 From ed8d19756e80ec63003a93aa4d70406e6ba61522 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 22 Jul 2013 18:49:58 -0700 Subject: drm: Pass page flip ioctl flags to driver This lets drivers see the flags requested by the application [airlied: fixup for rcar/imx/msm] Signed-off-by: Keith Packard Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 +++-- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/intel_display.c | 23 +++++++++++++++-------- drivers/gpu/drm/msm/mdp4/mdp4_crtc.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_display.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_display.h | 3 ++- drivers/gpu/drm/omapdrm/omap_crtc.c | 3 ++- drivers/gpu/drm/radeon/radeon_display.c | 3 ++- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 3 ++- drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 3 ++- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 3 ++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 3 ++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 3 ++- drivers/staging/imx-drm/ipuv3-crtc.c | 3 ++- include/drm/drm_crtc.h | 3 ++- 16 files changed, 45 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 54b4169fc48e..4f35be732b83 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3581,7 +3581,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } old_fb = crtc->fb; - ret = crtc->funcs->page_flip(crtc, fb, e); + ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { spin_lock_irqsave(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 9a35d171a6d3..14f5c1d34028 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -184,8 +184,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { }; static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct exynos_drm_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 84da3075a84b..d54354421538 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -380,7 +380,8 @@ struct drm_i915_display_funcs { void (*init_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj); + struct drm_i915_gem_object *obj, + uint32_t flags); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 370c902fa629..b52f374d0f00 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7481,7 +7481,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7525,7 +7526,8 @@ err: static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7566,7 +7568,8 @@ err: static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7614,7 +7617,8 @@ err: static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7664,7 +7668,8 @@ err: static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7714,14 +7719,16 @@ err: static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { return -ENODEV; } static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7791,7 +7798,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&intel_crtc->unpin_work_count); intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags); if (ret) goto cleanup_pending; diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c index bda0fc40b207..de6bea297cda 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c @@ -371,7 +371,8 @@ static void mdp4_crtc_load_lut(struct drm_crtc *crtc) static int mdp4_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *new_fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index dbcf10681ab2..44202bf7b819 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -521,7 +521,8 @@ fail: int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct nouveau_drm *drm = nouveau_drm(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index da84f1f40ec2..025c66f8e0ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -61,7 +61,8 @@ void nouveau_display_repin(struct drm_device *dev); void nouveau_display_resume(struct drm_device *dev); int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags); int nouveau_finish_page_flip(struct nouveau_channel *, struct nouveau_page_flip_state *); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 11a5263a5e9f..0fd2eb139f6e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -331,7 +331,8 @@ static void page_flip_cb(void *arg) static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c2b67b4e1ac2..358bd96c06c5 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) static int radeon_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 33df7a583143..a9d24e4bf792 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -497,7 +497,8 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct drm_device *dev = rcrtc->crtc.dev; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 99e2034e49cc..54bad98e9477 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); struct drm_device *dev = scrtc->crtc.dev; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index fe4726628906..d36efc13b16f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -148,7 +148,8 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc) static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index d4607b2530d6..fc43c0601236 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, int vmw_du_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct drm_framebuffer *old_fb = crtc->fb; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6fa89c9d6214..8d038c36bd57 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -123,7 +123,8 @@ struct vmw_display_unit { void vmw_display_unit_cleanup(struct vmw_display_unit *du); int vmw_du_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c index 9176a8171e6f..e39690a03e38 100644 --- a/drivers/staging/imx-drm/ipuv3-crtc.c +++ b/drivers/staging/imx-drm/ipuv3-crtc.c @@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) static int ipu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); int ret; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c4b1e6311467..0c7fec5b8fef 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -363,7 +363,8 @@ struct drm_crtc_funcs { */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); + struct drm_pending_vblank_event *event, + uint32_t flags); int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); -- cgit v1.2.3 From 9bba0c42ec81748462e58b18095c0eef8707cc9a Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 22 Jul 2013 18:49:59 -0700 Subject: drm: Add DRM_MODE_PAGE_FLIP_ASYNC flag definition This requests that the driver perform the page flip as soon as possible, not necessarily waiting for vblank. Signed-off-by: Keith Packard Signed-off-by: Dave Airlie --- include/uapi/drm/drm_mode.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 53db7cea373b..550811712f78 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -412,7 +412,8 @@ struct drm_mode_crtc_lut { }; #define DRM_MODE_PAGE_FLIP_EVENT 0x01 -#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT +#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 +#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) /* * Request a page flip on the specified crtc. @@ -426,11 +427,14 @@ struct drm_mode_crtc_lut { * flip is already pending as the ioctl is called, EBUSY will be * returned. * - * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will - * request that drm sends back a vblank event (see drm.h: struct - * drm_event_vblank) when the page flip is done. The user_data field - * passed in with this ioctl will be returned as the user_data field - * in the vblank event struct. + * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank + * event (see drm.h: struct drm_event_vblank) when the page flip is + * done. The user_data field passed in with this ioctl will be + * returned as the user_data field in the vblank event struct. + * + * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen + * 'as soon as possible', meaning that it not delay waiting for vblank. + * This may cause tearing on the screen. * * The reserved field must be zero until we figure out something * clever to use it for. -- cgit v1.2.3 From 62f2104f3fc11c4cfd1307429cb955bfa48dcb37 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 22 Jul 2013 18:50:00 -0700 Subject: drm: Advertise async page flip ability through GETCAP ioctl Let applications know whether the kernel supports asynchronous page flipping. Signed-off-by: Keith Packard Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 3 +++ drivers/gpu/drm/drm_ioctl.c | 3 +++ include/drm/drm_crtc.h | 3 +++ include/uapi/drm/drm.h | 1 + 4 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 4f35be732b83..452591b67996 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3508,6 +3508,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, page_flip->reserved != 0) return -EINVAL; + if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) + return -EINVAL; + obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) return -EINVAL; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index cffc7c0e1171..07247e2855a2 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -293,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) case DRM_CAP_TIMESTAMP_MONOTONIC: req->value = drm_timestamp_monotonic; break; + case DRM_CAP_ASYNC_PAGE_FLIP: + req->value = dev->mode_config.async_page_flip; + break; default: return -EINVAL; } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 0c7fec5b8fef..78ca1512c73f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -842,6 +842,9 @@ struct drm_mode_config { /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; + + /* whether async page flip is supported or not */ + bool async_page_flip; }; #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 272580ca320f..ece867889cc7 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -780,6 +780,7 @@ struct drm_event_vblank { #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 #define DRM_CAP_PRIME 0x5 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 +#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 #define DRM_PRIME_CAP_IMPORT 0x1 #define DRM_PRIME_CAP_EXPORT 0x2 -- cgit v1.2.3 From 118bdbd86b39dbb843155054021d2c59058f1e05 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 12 Aug 2013 11:04:29 -0400 Subject: drm/edid: add quirk for Medion MD30217PG This LCD monitor (1280x1024 native) has a completely bogus detailed timing (640x350@70hz). User reports that 1280x1024@60 has waves so prefer 1280x1024@75. Manufacturer: MED Model: 7b8 Serial#: 99188 Year: 2005 Week: 5 EDID Version: 1.3 Analog Display Input, Input Voltage Level: 0.700/0.700 V Sync: Separate Max Image Size [cm]: horiz.: 34 vert.: 27 Gamma: 2.50 DPMS capabilities: Off; RGB/Color Display First detailed timing is preferred mode redX: 0.645 redY: 0.348 greenX: 0.280 greenY: 0.605 blueX: 0.142 blueY: 0.071 whiteX: 0.313 whiteY: 0.329 Supported established timings: 720x400@70Hz 640x480@60Hz 640x480@72Hz 640x480@75Hz 800x600@56Hz 800x600@60Hz 800x600@72Hz 800x600@75Hz 1024x768@60Hz 1024x768@70Hz 1024x768@75Hz 1280x1024@75Hz Manufacturer's mask: 0 Supported standard timings: Supported detailed timing: clock: 25.2 MHz Image Size: 337 x 270 mm h_active: 640 h_sync: 688 h_sync_end 784 h_blank_end 800 h_border: 0 v_active: 350 v_sync: 350 v_sync_end 352 v_blanking: 449 v_border: 0 Monitor name: MD30217PG Ranges: V min: 56 V max: 76 Hz, H min: 30 H max: 83 kHz, PixClock max 145 MHz Serial No: 501099188 EDID (in hex): 00ffffffffffff0034a4b80774830100 050f010368221b962a0c55a559479b24 125054afcf00310a0101010101018180 000000000000d60980a0205e63103060 0200510e1100001e000000fc004d4433 3032313750470a202020000000fd0038 4c1e530e000a202020202020000000ff 003530313039393138380a2020200078 Signed-off-by: Alex Deucher Reported-by: friedrich@mailstation.de Cc: stable@vger.kernel.org --- drivers/gpu/drm/drm_edid.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95d6f4b6967c..70fc1335e331 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -125,6 +125,9 @@ static struct edid_quirk { /* ViewSonic VA2026w */ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + + /* Medion MD 30217 PG */ + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, }; /* -- cgit v1.2.3 From 8dddb993bc87b06590f64da5578663386498aafa Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 12 Jul 2013 14:52:30 -0400 Subject: drm/radeon: switch r6xx+ to using CP DMA for the blit copy callback CP DMA is lighter weight than using the 3D engine. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f8f8b3113ddd..1926ec06a638 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1022,7 +1022,7 @@ static struct radeon_asic r600_asic = { .hdmi_setmode = &r600_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1115,7 +1115,7 @@ static struct radeon_asic rv6xx_asic = { .get_backlight_level = &atombios_get_backlight_level, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1226,7 +1226,7 @@ static struct radeon_asic rs780_asic = { .hdmi_setmode = &r600_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r600_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1348,7 +1348,7 @@ static struct radeon_asic rv770_asic = { .hdmi_setmode = &r600_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &rv770_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1473,7 +1473,7 @@ static struct radeon_asic evergreen_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &evergreen_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1598,7 +1598,7 @@ static struct radeon_asic sumo_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &evergreen_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1722,7 +1722,7 @@ static struct radeon_asic btc_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &evergreen_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -1899,7 +1899,7 @@ static struct radeon_asic cayman_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &evergreen_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -2074,7 +2074,7 @@ static struct radeon_asic trinity_asic = { .get_backlight_level = &atombios_get_backlight_level, }, .copy = { - .blit = &r600_copy_blit, + .blit = &r600_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &evergreen_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, -- cgit v1.2.3 From 4f8629675800505c274bf7d17baefed197a76cd9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 5 Aug 2013 12:37:32 -0400 Subject: drm/radeon/kms: remove r6xx+ blit copy routines No longer used now that we use the async dma engines or CP DMA for bo copies. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 4 +- drivers/gpu/drm/radeon/cayman_blit_shaders.c | 54 -- drivers/gpu/drm/radeon/evergreen.c | 8 - drivers/gpu/drm/radeon/evergreen_blit_kms.c | 729 ---------------------- drivers/gpu/drm/radeon/evergreen_blit_shaders.c | 54 -- drivers/gpu/drm/radeon/ni.c | 8 - drivers/gpu/drm/radeon/r600.c | 26 - drivers/gpu/drm/radeon/r600_blit.c | 31 + drivers/gpu/drm/radeon/r600_blit_kms.c | 785 ------------------------ drivers/gpu/drm/radeon/r600_blit_shaders.h | 1 - drivers/gpu/drm/radeon/radeon.h | 30 - drivers/gpu/drm/radeon/radeon_asic.h | 16 - drivers/gpu/drm/radeon/radeon_blit_common.h | 44 -- drivers/gpu/drm/radeon/rv770.c | 7 - 14 files changed, 33 insertions(+), 1764 deletions(-) delete mode 100644 drivers/gpu/drm/radeon/evergreen_blit_kms.c delete mode 100644 drivers/gpu/drm/radeon/r600_blit_kms.c delete mode 100644 drivers/gpu/drm/radeon/radeon_blit_common.h diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index c3df52c1a60c..bfabd69b4e39 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -72,8 +72,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ - r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ + radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ + evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c index 19a0114d2e3b..98d009e154bf 100644 --- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c +++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c @@ -317,58 +317,4 @@ const u32 cayman_default_state[] = 0x00000010, /* */ }; -const u32 cayman_vs[] = -{ - 0x00000004, - 0x80400400, - 0x0000a03c, - 0x95000688, - 0x00004000, - 0x15000688, - 0x00000000, - 0x88000000, - 0x04000000, - 0x67961001, -#ifdef __BIG_ENDIAN - 0x00020000, -#else - 0x00000000, -#endif - 0x00000000, - 0x04000000, - 0x67961000, -#ifdef __BIG_ENDIAN - 0x00020008, -#else - 0x00000008, -#endif - 0x00000000, -}; - -const u32 cayman_ps[] = -{ - 0x00000004, - 0xa00c0000, - 0x00000008, - 0x80400000, - 0x00000000, - 0x95000688, - 0x00000000, - 0x88000000, - 0x00380400, - 0x00146b10, - 0x00380000, - 0x20146b10, - 0x00380400, - 0x40146b00, - 0x80380000, - 0x60146b00, - 0x00000010, - 0x000d1000, - 0xb0800000, - 0x00000000, -}; - -const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps); -const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs); const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d5b49e33315e..a5ab5693eb2a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5144,13 +5144,6 @@ static int evergreen_startup(struct radeon_device *rdev) } evergreen_gpu_init(rdev); - r = evergreen_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } - /* allocate rlc buffers */ if (rdev->flags & RADEON_IS_IGP) { rdev->rlc.reg_list = sumo_rlc_save_restore_register_list; @@ -5420,7 +5413,6 @@ int evergreen_init(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_blit_fini(rdev); r700_cp_fini(rdev); r600_dma_fini(rdev); r600_irq_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c deleted file mode 100644 index 057c87b6515a..000000000000 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ /dev/null @@ -1,729 +0,0 @@ -/* - * Copyright 2010 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Alex Deucher - */ - -#include -#include -#include "radeon.h" - -#include "evergreend.h" -#include "evergreen_blit_shaders.h" -#include "cayman_blit_shaders.h" -#include "radeon_blit_common.h" - -/* emits 17 */ -static void -set_render_target(struct radeon_device *rdev, int format, - int w, int h, u64 gpu_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 cb_color_info; - int pitch, slice; - - h = ALIGN(h, 8); - if (h < 8) - h = 8; - - cb_color_info = CB_FORMAT(format) | - CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | - CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - pitch = (w / 8) - 1; - slice = ((w * h) / 64) - 1; - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); - radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, pitch); - radeon_ring_write(ring, slice); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, cb_color_info); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, (w - 1) | ((h - 1) << 16)); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); -} - -/* emits 5dw */ -static void -cp_set_surface_sync(struct radeon_device *rdev, - u32 sync_type, u32 size, - u64 mc_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 cp_coher_size; - - if (size == 0xffffffff) - cp_coher_size = 0xffffffff; - else - cp_coher_size = ((size + 255) >> 8); - - if (rdev->family >= CHIP_CAYMAN) { - /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync - * to the RB directly. For IBs, the CP programs this as part of the - * surface_sync packet. - */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */ - } - radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, sync_type); - radeon_ring_write(ring, cp_coher_size); - radeon_ring_write(ring, mc_addr >> 8); - radeon_ring_write(ring, 10); /* poll interval */ -} - -/* emits 11dw + 1 surface sync = 16dw */ -static void -set_shaders(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u64 gpu_addr; - - /* VS */ - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); - radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, 2); - radeon_ring_write(ring, 0); - - /* PS */ - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); - radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, 1); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 2); - - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; - cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); -} - -/* emits 10 + 1 sync (5) = 15 */ -static void -set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 sq_vtx_constant_word2, sq_vtx_constant_word3; - - /* high addr, stride */ - sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | - SQ_VTXC_STRIDE(16); -#ifdef __BIG_ENDIAN - sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); -#endif - /* xyzw swizzles */ - sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) | - SQ_VTCX_SEL_Y(SQ_SEL_Y) | - SQ_VTCX_SEL_Z(SQ_SEL_Z) | - SQ_VTCX_SEL_W(SQ_SEL_W); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); - radeon_ring_write(ring, 0x580); - radeon_ring_write(ring, gpu_addr & 0xffffffff); - radeon_ring_write(ring, 48 - 1); /* size */ - radeon_ring_write(ring, sq_vtx_constant_word2); - radeon_ring_write(ring, sq_vtx_constant_word3); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); - - if ((rdev->family == CHIP_CEDAR) || - (rdev->family == CHIP_PALM) || - (rdev->family == CHIP_SUMO) || - (rdev->family == CHIP_SUMO2) || - (rdev->family == CHIP_CAICOS)) - cp_set_surface_sync(rdev, - PACKET3_TC_ACTION_ENA, 48, gpu_addr); - else - cp_set_surface_sync(rdev, - PACKET3_VC_ACTION_ENA, 48, gpu_addr); - -} - -/* emits 10 */ -static void -set_tex_resource(struct radeon_device *rdev, - int format, int w, int h, int pitch, - u64 gpu_addr, u32 size) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 sq_tex_resource_word0, sq_tex_resource_word1; - u32 sq_tex_resource_word4, sq_tex_resource_word7; - - if (h < 1) - h = 1; - - sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D); - sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | - ((w - 1) << 18)); - sq_tex_resource_word1 = ((h - 1) << 0) | - TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - /* xyzw swizzles */ - sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) | - TEX_DST_SEL_Y(SQ_SEL_Y) | - TEX_DST_SEL_Z(SQ_SEL_Z) | - TEX_DST_SEL_W(SQ_SEL_W); - - sq_tex_resource_word7 = format | - S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); - - cp_set_surface_sync(rdev, - PACKET3_TC_ACTION_ENA, size, gpu_addr); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, sq_tex_resource_word0); - radeon_ring_write(ring, sq_tex_resource_word1); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, sq_tex_resource_word4); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, sq_tex_resource_word7); -} - -/* emits 12 */ -static void -set_scissors(struct radeon_device *rdev, int x1, int y1, - int x2, int y2) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - /* workaround some hw bugs */ - if (x2 == 0) - x1 = 1; - if (y2 == 0) - y1 = 1; - if (rdev->family >= CHIP_CAYMAN) { - if ((x2 == 1) && (y2 == 1)) - x2 = 2; - } - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); -} - -/* emits 10 */ -static void -draw_auto(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, DI_PT_RECTLIST); - - radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); - radeon_ring_write(ring, -#ifdef __BIG_ENDIAN - (2 << 2) | -#endif - DI_INDEX_SIZE_16_BIT); - - radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); - radeon_ring_write(ring, 1); - - radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); - radeon_ring_write(ring, 3); - radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); - -} - -/* emits 39 */ -static void -set_default_state(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; - u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; - u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; - int num_ps_gprs, num_vs_gprs, num_temp_gprs; - int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; - int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; - int num_hs_threads, num_ls_threads; - int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; - int num_hs_stack_entries, num_ls_stack_entries; - u64 gpu_addr; - int dwords; - - /* set clear context state */ - radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); - radeon_ring_write(ring, 0); - - if (rdev->family < CHIP_CAYMAN) { - switch (rdev->family) { - case CHIP_CEDAR: - default: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 96; - num_vs_threads = 16; - num_gs_threads = 16; - num_es_threads = 16; - num_hs_threads = 16; - num_ls_threads = 16; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - case CHIP_REDWOOD: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 20; - num_gs_threads = 20; - num_es_threads = 20; - num_hs_threads = 20; - num_ls_threads = 20; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - case CHIP_JUNIPER: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 20; - num_gs_threads = 20; - num_es_threads = 20; - num_hs_threads = 20; - num_ls_threads = 20; - num_ps_stack_entries = 85; - num_vs_stack_entries = 85; - num_gs_stack_entries = 85; - num_es_stack_entries = 85; - num_hs_stack_entries = 85; - num_ls_stack_entries = 85; - break; - case CHIP_CYPRESS: - case CHIP_HEMLOCK: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 20; - num_gs_threads = 20; - num_es_threads = 20; - num_hs_threads = 20; - num_ls_threads = 20; - num_ps_stack_entries = 85; - num_vs_stack_entries = 85; - num_gs_stack_entries = 85; - num_es_stack_entries = 85; - num_hs_stack_entries = 85; - num_ls_stack_entries = 85; - break; - case CHIP_PALM: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 96; - num_vs_threads = 16; - num_gs_threads = 16; - num_es_threads = 16; - num_hs_threads = 16; - num_ls_threads = 16; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - case CHIP_SUMO: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 96; - num_vs_threads = 25; - num_gs_threads = 25; - num_es_threads = 25; - num_hs_threads = 25; - num_ls_threads = 25; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - case CHIP_SUMO2: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 96; - num_vs_threads = 25; - num_gs_threads = 25; - num_es_threads = 25; - num_hs_threads = 25; - num_ls_threads = 25; - num_ps_stack_entries = 85; - num_vs_stack_entries = 85; - num_gs_stack_entries = 85; - num_es_stack_entries = 85; - num_hs_stack_entries = 85; - num_ls_stack_entries = 85; - break; - case CHIP_BARTS: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 20; - num_gs_threads = 20; - num_es_threads = 20; - num_hs_threads = 20; - num_ls_threads = 20; - num_ps_stack_entries = 85; - num_vs_stack_entries = 85; - num_gs_stack_entries = 85; - num_es_stack_entries = 85; - num_hs_stack_entries = 85; - num_ls_stack_entries = 85; - break; - case CHIP_TURKS: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 20; - num_gs_threads = 20; - num_es_threads = 20; - num_hs_threads = 20; - num_ls_threads = 20; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - case CHIP_CAICOS: - num_ps_gprs = 93; - num_vs_gprs = 46; - num_temp_gprs = 4; - num_gs_gprs = 31; - num_es_gprs = 31; - num_hs_gprs = 23; - num_ls_gprs = 23; - num_ps_threads = 128; - num_vs_threads = 10; - num_gs_threads = 10; - num_es_threads = 10; - num_hs_threads = 10; - num_ls_threads = 10; - num_ps_stack_entries = 42; - num_vs_stack_entries = 42; - num_gs_stack_entries = 42; - num_es_stack_entries = 42; - num_hs_stack_entries = 42; - num_ls_stack_entries = 42; - break; - } - - if ((rdev->family == CHIP_CEDAR) || - (rdev->family == CHIP_PALM) || - (rdev->family == CHIP_SUMO) || - (rdev->family == CHIP_SUMO2) || - (rdev->family == CHIP_CAICOS)) - sq_config = 0; - else - sq_config = VC_ENABLE; - - sq_config |= (EXPORT_SRC_C | - CS_PRIO(0) | - LS_PRIO(0) | - HS_PRIO(0) | - PS_PRIO(0) | - VS_PRIO(1) | - GS_PRIO(2) | - ES_PRIO(3)); - - sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | - NUM_VS_GPRS(num_vs_gprs) | - NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); - sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | - NUM_ES_GPRS(num_es_gprs)); - sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | - NUM_LS_GPRS(num_ls_gprs)); - sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | - NUM_VS_THREADS(num_vs_threads) | - NUM_GS_THREADS(num_gs_threads) | - NUM_ES_THREADS(num_es_threads)); - sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | - NUM_LS_THREADS(num_ls_threads)); - sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | - NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); - sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | - NUM_ES_STACK_ENTRIES(num_es_stack_entries)); - sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | - NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); - - /* disable dyn gprs */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, 0); - - /* setup LDS */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, 0x10001000); - - /* SQ config */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11)); - radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, sq_config); - radeon_ring_write(ring, sq_gpr_resource_mgmt_1); - radeon_ring_write(ring, sq_gpr_resource_mgmt_2); - radeon_ring_write(ring, sq_gpr_resource_mgmt_3); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, sq_thread_resource_mgmt); - radeon_ring_write(ring, sq_thread_resource_mgmt_2); - radeon_ring_write(ring, sq_stack_resource_mgmt_1); - radeon_ring_write(ring, sq_stack_resource_mgmt_2); - radeon_ring_write(ring, sq_stack_resource_mgmt_3); - } - - /* CONTEXT_CONTROL */ - radeon_ring_write(ring, 0xc0012800); - radeon_ring_write(ring, 0x80000000); - radeon_ring_write(ring, 0x80000000); - - /* SQ_VTX_BASE_VTX_LOC */ - radeon_ring_write(ring, 0xc0026f00); - radeon_ring_write(ring, 0x00000000); - radeon_ring_write(ring, 0x00000000); - radeon_ring_write(ring, 0x00000000); - - /* SET_SAMPLER */ - radeon_ring_write(ring, 0xc0036e00); - radeon_ring_write(ring, 0x00000000); - radeon_ring_write(ring, 0x00000012); - radeon_ring_write(ring, 0x00000000); - radeon_ring_write(ring, 0x00000000); - - /* set to DX10/11 mode */ - radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); - radeon_ring_write(ring, 1); - - /* emit an IB pointing at default state */ - dwords = ALIGN(rdev->r600_blit.state_len, 0x10); - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; - radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC); - radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); - radeon_ring_write(ring, dwords); - -} - -int evergreen_blit_init(struct radeon_device *rdev) -{ - u32 obj_size; - int i, r, dwords; - void *ptr; - u32 packet2s[16]; - int num_packet2s = 0; - - rdev->r600_blit.primitives.set_render_target = set_render_target; - rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; - rdev->r600_blit.primitives.set_shaders = set_shaders; - rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; - rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; - rdev->r600_blit.primitives.set_scissors = set_scissors; - rdev->r600_blit.primitives.draw_auto = draw_auto; - rdev->r600_blit.primitives.set_default_state = set_default_state; - - rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ - rdev->r600_blit.ring_size_common += 55; /* shaders + def state */ - rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ - rdev->r600_blit.ring_size_common += 5; /* done copy */ - rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ - - rdev->r600_blit.ring_size_per_loop = 74; - if (rdev->family >= CHIP_CAYMAN) - rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */ - - rdev->r600_blit.max_dim = 16384; - - rdev->r600_blit.state_offset = 0; - - if (rdev->family < CHIP_CAYMAN) - rdev->r600_blit.state_len = evergreen_default_size; - else - rdev->r600_blit.state_len = cayman_default_size; - - dwords = rdev->r600_blit.state_len; - while (dwords & 0xf) { - packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); - dwords++; - } - - obj_size = dwords * 4; - obj_size = ALIGN(obj_size, 256); - - rdev->r600_blit.vs_offset = obj_size; - if (rdev->family < CHIP_CAYMAN) - obj_size += evergreen_vs_size * 4; - else - obj_size += cayman_vs_size * 4; - obj_size = ALIGN(obj_size, 256); - - rdev->r600_blit.ps_offset = obj_size; - if (rdev->family < CHIP_CAYMAN) - obj_size += evergreen_ps_size * 4; - else - obj_size += cayman_ps_size * 4; - obj_size = ALIGN(obj_size, 256); - - /* pin copy shader into vram if not already initialized */ - if (!rdev->r600_blit.shader_obj) { - r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("evergreen failed to allocate shader\n"); - return r; - } - - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } - } - - DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", - obj_size, - rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); - - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); - if (r) { - DRM_ERROR("failed to map blit object %d\n", r); - return r; - } - - if (rdev->family < CHIP_CAYMAN) { - memcpy_toio(ptr + rdev->r600_blit.state_offset, - evergreen_default_state, rdev->r600_blit.state_len * 4); - - if (num_packet2s) - memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), - packet2s, num_packet2s * 4); - for (i = 0; i < evergreen_vs_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); - for (i = 0; i < evergreen_ps_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); - } else { - memcpy_toio(ptr + rdev->r600_blit.state_offset, - cayman_default_state, rdev->r600_blit.state_len * 4); - - if (num_packet2s) - memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), - packet2s, num_packet2s * 4); - for (i = 0; i < cayman_vs_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]); - for (i = 0; i < cayman_ps_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]); - } - radeon_bo_kunmap(rdev->r600_blit.shader_obj); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - return 0; -} diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c index f85c0af115b5..d43383470cdf 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c @@ -300,58 +300,4 @@ const u32 evergreen_default_state[] = 0x00000010, /* */ }; -const u32 evergreen_vs[] = -{ - 0x00000004, - 0x80800400, - 0x0000a03c, - 0x95000688, - 0x00004000, - 0x15200688, - 0x00000000, - 0x00000000, - 0x3c000000, - 0x67961001, -#ifdef __BIG_ENDIAN - 0x000a0000, -#else - 0x00080000, -#endif - 0x00000000, - 0x1c000000, - 0x67961000, -#ifdef __BIG_ENDIAN - 0x00020008, -#else - 0x00000008, -#endif - 0x00000000, -}; - -const u32 evergreen_ps[] = -{ - 0x00000003, - 0xa00c0000, - 0x00000008, - 0x80400000, - 0x00000000, - 0x95200688, - 0x00380400, - 0x00146b10, - 0x00380000, - 0x20146b10, - 0x00380400, - 0x40146b00, - 0x80380000, - 0x60146b00, - 0x00000000, - 0x00000000, - 0x00000010, - 0x000d1000, - 0xb0800000, - 0x00000000, -}; - -const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); -const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ccb4f8b54852..acdd6039ef14 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2118,13 +2118,6 @@ static int cayman_startup(struct radeon_device *rdev) return r; cayman_gpu_init(rdev); - r = evergreen_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } - /* allocate rlc buffers */ if (rdev->flags & RADEON_IS_IGP) { rdev->rlc.reg_list = tn_rlc_save_restore_register_list; @@ -2413,7 +2406,6 @@ int cayman_init(struct radeon_device *rdev) void cayman_fini(struct radeon_device *rdev) { - r600_blit_fini(rdev); cayman_cp_fini(rdev); cayman_dma_fini(rdev); r600_irq_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e66e72077350..3db2e4ddb2d6 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3136,25 +3136,6 @@ void r600_uvd_semaphore_emit(struct radeon_device *rdev, radeon_ring_write(ring, emit_wait ? 1 : 0); } -int r600_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - struct radeon_sa_bo *vb = NULL; - int r; - - r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem); - if (r) { - return r; - } - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); - r600_blit_done_copy(rdev, fence, vb, sem); - return 0; -} - /** * r600_copy_cpdma - copy pages using the CP DMA engine * @@ -3356,12 +3337,6 @@ static int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* allocate wb buffer */ r = radeon_wb_init(rdev); @@ -3574,7 +3549,6 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_blit_fini(rdev); r600_cp_fini(rdev); r600_dma_fini(rdev); r600_irq_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index f651881eb0ae..daf7572be976 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -31,6 +31,37 @@ #include "r600_blit_shaders.h" +/* 23 bits of float fractional data */ +#define I2F_FRAC_BITS 23 +#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1) + +/* + * Converts unsigned integer into 32-bit IEEE floating point representation. + * Will be exact from 0 to 2^24. Above that, we round towards zero + * as the fractional bits will not fit in a float. (It would be better to + * round towards even as the fpu does, but that is slower.) + */ +static __pure uint32_t int2float(uint32_t x) +{ + uint32_t msb, exponent, fraction; + + /* Zero is special */ + if (!x) return 0; + + /* Get location of the most significant bit */ + msb = __fls(x); + + /* + * Use a rotate instead of a shift because that works both leftwards + * and rightwards due to the mod(32) behaviour. This means we don't + * need to check to see if we are above 2^24 or not. + */ + fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK; + exponent = (127 + msb) << I2F_FRAC_BITS; + + return fraction + exponent; +} + #define DI_PT_RECTLIST 0x11 #define DI_INDEX_SIZE_16_BIT 0x0 #define DI_SRC_SEL_AUTO_INDEX 0x2 diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c deleted file mode 100644 index 9fb5780a552f..000000000000 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ /dev/null @@ -1,785 +0,0 @@ -/* - * Copyright 2009 Advanced Micro Devices, Inc. - * Copyright 2009 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include "radeon.h" - -#include "r600d.h" -#include "r600_blit_shaders.h" -#include "radeon_blit_common.h" - -/* 23 bits of float fractional data */ -#define I2F_FRAC_BITS 23 -#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1) - -/* - * Converts unsigned integer into 32-bit IEEE floating point representation. - * Will be exact from 0 to 2^24. Above that, we round towards zero - * as the fractional bits will not fit in a float. (It would be better to - * round towards even as the fpu does, but that is slower.) - */ -__pure uint32_t int2float(uint32_t x) -{ - uint32_t msb, exponent, fraction; - - /* Zero is special */ - if (!x) return 0; - - /* Get location of the most significant bit */ - msb = __fls(x); - - /* - * Use a rotate instead of a shift because that works both leftwards - * and rightwards due to the mod(32) behaviour. This means we don't - * need to check to see if we are above 2^24 or not. - */ - fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK; - exponent = (127 + msb) << I2F_FRAC_BITS; - - return fraction + exponent; -} - -/* emits 21 on rv770+, 23 on r600 */ -static void -set_render_target(struct radeon_device *rdev, int format, - int w, int h, u64 gpu_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 cb_color_info; - int pitch, slice; - - h = ALIGN(h, 8); - if (h < 8) - h = 8; - - cb_color_info = CB_FORMAT(format) | - CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | - CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - pitch = (w / 8) - 1; - slice = ((w * h) / 64) - 1; - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - - if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { - radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); - radeon_ring_write(ring, 2 << 0); - } - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (pitch << 0) | (slice << 10)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, cb_color_info); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); -} - -/* emits 5dw */ -static void -cp_set_surface_sync(struct radeon_device *rdev, - u32 sync_type, u32 size, - u64 mc_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 cp_coher_size; - - if (size == 0xffffffff) - cp_coher_size = 0xffffffff; - else - cp_coher_size = ((size + 255) >> 8); - - radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, sync_type); - radeon_ring_write(ring, cp_coher_size); - radeon_ring_write(ring, mc_addr >> 8); - radeon_ring_write(ring, 10); /* poll interval */ -} - -/* emits 21dw + 1 surface sync = 26dw */ -static void -set_shaders(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u64 gpu_addr; - u32 sq_pgm_resources; - - /* setup shader regs */ - sq_pgm_resources = (1 << 0); - - /* VS */ - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, sq_pgm_resources); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); - - /* PS */ - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, gpu_addr >> 8); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, sq_pgm_resources | (1 << 28)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 2); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, 0); - - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; - cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); -} - -/* emits 9 + 1 sync (5) = 14*/ -static void -set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 sq_vtx_constant_word2; - - sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | - SQ_VTXC_STRIDE(16); -#ifdef __BIG_ENDIAN - sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); -#endif - - radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); - radeon_ring_write(ring, 0x460); - radeon_ring_write(ring, gpu_addr & 0xffffffff); - radeon_ring_write(ring, 48 - 1); - radeon_ring_write(ring, sq_vtx_constant_word2); - radeon_ring_write(ring, 1 << 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30); - - if ((rdev->family == CHIP_RV610) || - (rdev->family == CHIP_RV620) || - (rdev->family == CHIP_RS780) || - (rdev->family == CHIP_RS880) || - (rdev->family == CHIP_RV710)) - cp_set_surface_sync(rdev, - PACKET3_TC_ACTION_ENA, 48, gpu_addr); - else - cp_set_surface_sync(rdev, - PACKET3_VC_ACTION_ENA, 48, gpu_addr); -} - -/* emits 9 */ -static void -set_tex_resource(struct radeon_device *rdev, - int format, int w, int h, int pitch, - u64 gpu_addr, u32 size) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; - - if (h < 1) - h = 1; - - sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) | - S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); - sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) | - S_038000_TEX_WIDTH(w - 1); - - sq_tex_resource_word1 = S_038004_DATA_FORMAT(format); - sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1); - - sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) | - S_038010_DST_SEL_X(SQ_SEL_X) | - S_038010_DST_SEL_Y(SQ_SEL_Y) | - S_038010_DST_SEL_Z(SQ_SEL_Z) | - S_038010_DST_SEL_W(SQ_SEL_W); - - cp_set_surface_sync(rdev, - PACKET3_TC_ACTION_ENA, size, gpu_addr); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, sq_tex_resource_word0); - radeon_ring_write(ring, sq_tex_resource_word1); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, gpu_addr >> 8); - radeon_ring_write(ring, sq_tex_resource_word4); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30); -} - -/* emits 12 */ -static void -set_scissors(struct radeon_device *rdev, int x1, int y1, - int x2, int y2) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); - - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); - radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); -} - -/* emits 10 */ -static void -draw_auto(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(ring, DI_PT_RECTLIST); - - radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); - radeon_ring_write(ring, -#ifdef __BIG_ENDIAN - (2 << 2) | -#endif - DI_INDEX_SIZE_16_BIT); - - radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); - radeon_ring_write(ring, 1); - - radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); - radeon_ring_write(ring, 3); - radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); - -} - -/* emits 14 */ -static void -set_default_state(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; - u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; - int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; - int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; - int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; - u64 gpu_addr; - int dwords; - - switch (rdev->family) { - case CHIP_R600: - num_ps_gprs = 192; - num_vs_gprs = 56; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 136; - num_vs_threads = 48; - num_gs_threads = 4; - num_es_threads = 4; - num_ps_stack_entries = 128; - num_vs_stack_entries = 128; - num_gs_stack_entries = 0; - num_es_stack_entries = 0; - break; - case CHIP_RV630: - case CHIP_RV635: - num_ps_gprs = 84; - num_vs_gprs = 36; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 144; - num_vs_threads = 40; - num_gs_threads = 4; - num_es_threads = 4; - num_ps_stack_entries = 40; - num_vs_stack_entries = 40; - num_gs_stack_entries = 32; - num_es_stack_entries = 16; - break; - case CHIP_RV610: - case CHIP_RV620: - case CHIP_RS780: - case CHIP_RS880: - default: - num_ps_gprs = 84; - num_vs_gprs = 36; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 136; - num_vs_threads = 48; - num_gs_threads = 4; - num_es_threads = 4; - num_ps_stack_entries = 40; - num_vs_stack_entries = 40; - num_gs_stack_entries = 32; - num_es_stack_entries = 16; - break; - case CHIP_RV670: - num_ps_gprs = 144; - num_vs_gprs = 40; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 136; - num_vs_threads = 48; - num_gs_threads = 4; - num_es_threads = 4; - num_ps_stack_entries = 40; - num_vs_stack_entries = 40; - num_gs_stack_entries = 32; - num_es_stack_entries = 16; - break; - case CHIP_RV770: - num_ps_gprs = 192; - num_vs_gprs = 56; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 188; - num_vs_threads = 60; - num_gs_threads = 0; - num_es_threads = 0; - num_ps_stack_entries = 256; - num_vs_stack_entries = 256; - num_gs_stack_entries = 0; - num_es_stack_entries = 0; - break; - case CHIP_RV730: - case CHIP_RV740: - num_ps_gprs = 84; - num_vs_gprs = 36; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 188; - num_vs_threads = 60; - num_gs_threads = 0; - num_es_threads = 0; - num_ps_stack_entries = 128; - num_vs_stack_entries = 128; - num_gs_stack_entries = 0; - num_es_stack_entries = 0; - break; - case CHIP_RV710: - num_ps_gprs = 192; - num_vs_gprs = 56; - num_temp_gprs = 4; - num_gs_gprs = 0; - num_es_gprs = 0; - num_ps_threads = 144; - num_vs_threads = 48; - num_gs_threads = 0; - num_es_threads = 0; - num_ps_stack_entries = 128; - num_vs_stack_entries = 128; - num_gs_stack_entries = 0; - num_es_stack_entries = 0; - break; - } - - if ((rdev->family == CHIP_RV610) || - (rdev->family == CHIP_RV620) || - (rdev->family == CHIP_RS780) || - (rdev->family == CHIP_RS880) || - (rdev->family == CHIP_RV710)) - sq_config = 0; - else - sq_config = VC_ENABLE; - - sq_config |= (DX9_CONSTS | - ALU_INST_PREFER_VECTOR | - PS_PRIO(0) | - VS_PRIO(1) | - GS_PRIO(2) | - ES_PRIO(3)); - - sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | - NUM_VS_GPRS(num_vs_gprs) | - NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); - sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | - NUM_ES_GPRS(num_es_gprs)); - sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | - NUM_VS_THREADS(num_vs_threads) | - NUM_GS_THREADS(num_gs_threads) | - NUM_ES_THREADS(num_es_threads)); - sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | - NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); - sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | - NUM_ES_STACK_ENTRIES(num_es_stack_entries)); - - /* emit an IB pointing at default state */ - dwords = ALIGN(rdev->r600_blit.state_len, 0x10); - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; - radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(ring, -#ifdef __BIG_ENDIAN - (2 << 0) | -#endif - (gpu_addr & 0xFFFFFFFC)); - radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); - radeon_ring_write(ring, dwords); - - /* SQ config */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6)); - radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(ring, sq_config); - radeon_ring_write(ring, sq_gpr_resource_mgmt_1); - radeon_ring_write(ring, sq_gpr_resource_mgmt_2); - radeon_ring_write(ring, sq_thread_resource_mgmt); - radeon_ring_write(ring, sq_stack_resource_mgmt_1); - radeon_ring_write(ring, sq_stack_resource_mgmt_2); -} - -int r600_blit_init(struct radeon_device *rdev) -{ - u32 obj_size; - int i, r, dwords; - void *ptr; - u32 packet2s[16]; - int num_packet2s = 0; - - rdev->r600_blit.primitives.set_render_target = set_render_target; - rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; - rdev->r600_blit.primitives.set_shaders = set_shaders; - rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; - rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; - rdev->r600_blit.primitives.set_scissors = set_scissors; - rdev->r600_blit.primitives.draw_auto = draw_auto; - rdev->r600_blit.primitives.set_default_state = set_default_state; - - rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ - rdev->r600_blit.ring_size_common += 40; /* shaders + def state */ - rdev->r600_blit.ring_size_common += 5; /* done copy */ - rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ - - rdev->r600_blit.ring_size_per_loop = 76; - /* set_render_target emits 2 extra dwords on rv6xx */ - if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) - rdev->r600_blit.ring_size_per_loop += 2; - - rdev->r600_blit.max_dim = 8192; - - rdev->r600_blit.state_offset = 0; - - if (rdev->family >= CHIP_RV770) - rdev->r600_blit.state_len = r7xx_default_size; - else - rdev->r600_blit.state_len = r6xx_default_size; - - dwords = rdev->r600_blit.state_len; - while (dwords & 0xf) { - packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); - dwords++; - } - - obj_size = dwords * 4; - obj_size = ALIGN(obj_size, 256); - - rdev->r600_blit.vs_offset = obj_size; - obj_size += r6xx_vs_size * 4; - obj_size = ALIGN(obj_size, 256); - - rdev->r600_blit.ps_offset = obj_size; - obj_size += r6xx_ps_size * 4; - obj_size = ALIGN(obj_size, 256); - - /* pin copy shader into vram if not already initialized */ - if (rdev->r600_blit.shader_obj == NULL) { - r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("r600 failed to allocate shader\n"); - return r; - } - - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } - } - - DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", - obj_size, - rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); - - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); - if (r) { - DRM_ERROR("failed to map blit object %d\n", r); - return r; - } - if (rdev->family >= CHIP_RV770) - memcpy_toio(ptr + rdev->r600_blit.state_offset, - r7xx_default_state, rdev->r600_blit.state_len * 4); - else - memcpy_toio(ptr + rdev->r600_blit.state_offset, - r6xx_default_state, rdev->r600_blit.state_len * 4); - if (num_packet2s) - memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), - packet2s, num_packet2s * 4); - for (i = 0; i < r6xx_vs_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); - for (i = 0; i < r6xx_ps_size; i++) - *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); - radeon_bo_kunmap(rdev->r600_blit.shader_obj); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - return 0; -} - -void r600_blit_fini(struct radeon_device *rdev) -{ - int r; - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - if (rdev->r600_blit.shader_obj == NULL) - return; - /* If we can't reserve the bo, unref should be enough to destroy - * it when it becomes idle. - */ - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (!r) { - radeon_bo_unpin(rdev->r600_blit.shader_obj); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - } - radeon_bo_unref(&rdev->r600_blit.shader_obj); -} - -static unsigned r600_blit_create_rect(unsigned num_gpu_pages, - int *width, int *height, int max_dim) -{ - unsigned max_pages; - unsigned pages = num_gpu_pages; - int w, h; - - if (num_gpu_pages == 0) { - /* not supposed to be called with no pages, but just in case */ - h = 0; - w = 0; - pages = 0; - WARN_ON(1); - } else { - int rect_order = 2; - h = RECT_UNIT_H; - while (num_gpu_pages / rect_order) { - h *= 2; - rect_order *= 4; - if (h >= max_dim) { - h = max_dim; - break; - } - } - max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H); - if (pages > max_pages) - pages = max_pages; - w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h; - w = (w / RECT_UNIT_W) * RECT_UNIT_W; - pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H); - BUG_ON(pages == 0); - } - - - DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages); - - /* return width and height only of the caller wants it */ - if (height) - *height = h; - if (width) - *width = w; - - return pages; -} - - -int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, - struct radeon_fence **fence, struct radeon_sa_bo **vb, - struct radeon_semaphore **sem) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - int r; - int ring_size; - int num_loops = 0; - int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; - - /* num loops */ - while (num_gpu_pages) { - num_gpu_pages -= - r600_blit_create_rect(num_gpu_pages, NULL, NULL, - rdev->r600_blit.max_dim); - num_loops++; - } - - /* 48 bytes for vertex per loop */ - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb, - (num_loops*48)+256, 256, true); - if (r) { - return r; - } - - r = radeon_semaphore_create(rdev, sem); - if (r) { - radeon_sa_bo_free(rdev, vb, NULL); - return r; - } - - /* calculate number of loops correctly */ - ring_size = num_loops * dwords_per_loop; - ring_size += rdev->r600_blit.ring_size_common; - r = radeon_ring_lock(rdev, ring, ring_size); - if (r) { - radeon_sa_bo_free(rdev, vb, NULL); - radeon_semaphore_free(rdev, sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) { - radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring, - RADEON_RING_TYPE_GFX_INDEX); - radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX); - } else { - radeon_semaphore_free(rdev, sem, NULL); - } - - rdev->r600_blit.primitives.set_default_state(rdev); - rdev->r600_blit.primitives.set_shaders(rdev); - return 0; -} - -void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, - struct radeon_sa_bo *vb, struct radeon_semaphore *sem) -{ - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - int r; - - r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_sa_bo_free(rdev, &vb, *fence); - radeon_semaphore_free(rdev, &sem, *fence); -} - -void r600_kms_blit_copy(struct radeon_device *rdev, - u64 src_gpu_addr, u64 dst_gpu_addr, - unsigned num_gpu_pages, - struct radeon_sa_bo *vb) -{ - u64 vb_gpu_addr; - u32 *vb_cpu_addr; - - DRM_DEBUG("emitting copy %16llx %16llx %d\n", - src_gpu_addr, dst_gpu_addr, num_gpu_pages); - vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb); - vb_gpu_addr = radeon_sa_bo_gpu_addr(vb); - - while (num_gpu_pages) { - int w, h; - unsigned size_in_bytes; - unsigned pages_per_loop = - r600_blit_create_rect(num_gpu_pages, &w, &h, - rdev->r600_blit.max_dim); - - size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; - DRM_DEBUG("rectangle w=%d h=%d\n", w, h); - - vb_cpu_addr[0] = 0; - vb_cpu_addr[1] = 0; - vb_cpu_addr[2] = 0; - vb_cpu_addr[3] = 0; - - vb_cpu_addr[4] = 0; - vb_cpu_addr[5] = int2float(h); - vb_cpu_addr[6] = 0; - vb_cpu_addr[7] = int2float(h); - - vb_cpu_addr[8] = int2float(w); - vb_cpu_addr[9] = int2float(h); - vb_cpu_addr[10] = int2float(w); - vb_cpu_addr[11] = int2float(h); - - rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, - w, h, w, src_gpu_addr, size_in_bytes); - rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, - w, h, dst_gpu_addr); - rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); - rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); - rdev->r600_blit.primitives.draw_auto(rdev); - rdev->r600_blit.primitives.cp_set_surface_sync(rdev, - PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, - size_in_bytes, dst_gpu_addr); - - vb_cpu_addr += 12; - vb_gpu_addr += 4*12; - src_gpu_addr += size_in_bytes; - dst_gpu_addr += size_in_bytes; - num_gpu_pages -= pages_per_loop; - } -} diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h index 2f3ce7a75976..f437d36dd98c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.h +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h @@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[]; extern const u32 r6xx_ps_size, r6xx_vs_size; extern const u32 r6xx_default_size, r7xx_default_size; -__pure uint32_t int2float(uint32_t x); #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9f19259667df..83be8fdceab1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -844,35 +844,6 @@ struct r600_ih { bool enabled; }; -struct r600_blit_cp_primitives { - void (*set_render_target)(struct radeon_device *rdev, int format, - int w, int h, u64 gpu_addr); - void (*cp_set_surface_sync)(struct radeon_device *rdev, - u32 sync_type, u32 size, - u64 mc_addr); - void (*set_shaders)(struct radeon_device *rdev); - void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); - void (*set_tex_resource)(struct radeon_device *rdev, - int format, int w, int h, int pitch, - u64 gpu_addr, u32 size); - void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, - int x2, int y2); - void (*draw_auto)(struct radeon_device *rdev); - void (*set_default_state)(struct radeon_device *rdev); -}; - -struct r600_blit { - struct radeon_bo *shader_obj; - struct r600_blit_cp_primitives primitives; - int max_dim; - int ring_size_common; - int ring_size_per_loop; - u64 shader_gpu_addr; - u32 vs_offset, ps_offset; - u32 state_offset; - u32 state_len; -}; - /* * RLC stuff */ @@ -2066,7 +2037,6 @@ struct radeon_device { const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ const struct firmware *uvd_fw; /* UVD firmware */ - struct r600_blit r600_blit; struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d61d5aac18f..3cf7d89c1bd8 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -337,9 +337,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); -int r600_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence **fence); int r600_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); @@ -371,8 +368,6 @@ int r600_count_pipe_bits(uint32_t val); int r600_mc_wait_for_idle(struct radeon_device *rdev); int r600_pcie_gart_init(struct radeon_device *rdev); void r600_scratch_init(struct radeon_device *rdev); -int r600_blit_init(struct radeon_device *rdev); -void r600_blit_fini(struct radeon_device *rdev); int r600_init_microcode(struct radeon_device *rdev); /* r600 irq */ int r600_irq_process(struct radeon_device *rdev); @@ -391,16 +386,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); -/* r600 blit */ -int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, - struct radeon_fence **fence, struct radeon_sa_bo **vb, - struct radeon_semaphore **sem); -void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, - struct radeon_sa_bo *vb, struct radeon_semaphore *sem); -void r600_kms_blit_copy(struct radeon_device *rdev, - u64 src_gpu_addr, u64 dst_gpu_addr, - unsigned num_gpu_pages, - struct radeon_sa_bo *vb); int r600_mc_wait_for_idle(struct radeon_device *rdev); u32 r600_get_xclk(struct radeon_device *rdev); uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); @@ -530,7 +515,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); void evergreen_disable_interrupt_state(struct radeon_device *rdev); -int evergreen_blit_init(struct radeon_device *rdev); int evergreen_mc_wait_for_idle(struct radeon_device *rdev); void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h deleted file mode 100644 index 4ecbe72c9d2d..000000000000 --- a/drivers/gpu/drm/radeon/radeon_blit_common.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2009 Advanced Micro Devices, Inc. - * Copyright 2009 Red Hat Inc. - * Copyright 2012 Alcatel-Lucent, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __RADEON_BLIT_COMMON_H__ - -#define DI_PT_RECTLIST 0x11 -#define DI_INDEX_SIZE_16_BIT 0x0 -#define DI_SRC_SEL_AUTO_INDEX 0x2 - -#define FMT_8 0x1 -#define FMT_5_6_5 0x8 -#define FMT_8_8_8_8 0x1a -#define COLOR_8 0x1 -#define COLOR_5_6_5 0x8 -#define COLOR_8_8_8_8 0x1a - -#define RECT_UNIT_H 32 -#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H) - -#define __RADEON_BLIT_COMMON_H__ -#endif diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index f5e92cfcc140..95590bd07afb 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1852,12 +1852,6 @@ static int rv770_startup(struct radeon_device *rdev) } rv770_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* allocate wb buffer */ r = radeon_wb_init(rdev); @@ -2092,7 +2086,6 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { - r600_blit_fini(rdev); r700_cp_fini(rdev); r600_dma_fini(rdev); r600_irq_fini(rdev); -- cgit v1.2.3 From 85a129ca8db375ce046faa34eb1387ea4247e268 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 5 Aug 2013 12:41:20 -0400 Subject: drm/radeon: add UVD->DPM helper function (v5) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper function for counting the number of open stream handles. v2: fix copy-pasta in comments and whitespace error v3: make function static since it's only used in radeon_uvd.c at the moment v4: make non-static again for future changes v5: make static again for new rework of dpm uvd changes Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_uvd.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 83be8fdceab1..fc8d03be933c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1441,6 +1441,7 @@ struct radeon_uvd { void *saved_bo; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; + unsigned img_size[RADEON_MAX_UVD_HANDLES]; struct delayed_work idle_work; }; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index b79f4f5cdd62..2a4cff1acf02 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -147,6 +147,7 @@ int radeon_uvd_init(struct radeon_device *rdev) for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; + rdev->uvd.img_size[i] = 0; } return 0; @@ -347,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { int32_t *msg, msg_type, handle; + unsigned img_size = 0; void *ptr; int i, r; @@ -383,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, if (msg_type == 1) { /* it's a decode msg, calc buffer sizes */ r = radeon_uvd_cs_msg_decode(msg, buf_sizes); + /* calc image size (width * height) */ + img_size = msg[6] * msg[7]; radeon_bo_kunmap(bo); if (r) return r; @@ -394,6 +398,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, radeon_bo_kunmap(bo); return 0; } else { + /* it's a create msg, calc image size (width * height) */ + img_size = msg[7] * msg[8]; radeon_bo_kunmap(bo); if (msg_type != 0) { @@ -414,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { p->rdev->uvd.filp[i] = p->filp; + p->rdev->uvd.img_size[i] = img_size; return 0; } } @@ -733,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, return radeon_uvd_send_msg(rdev, ring, bo, fence); } +/** + * radeon_uvd_count_handles - count number of open streams + * + * @rdev: radeon_device pointer + * @sd: number of SD streams + * @hd: number of HD streams + * + * Count the number of open SD/HD streams as a hint for power mangement + */ +static void radeon_uvd_count_handles(struct radeon_device *rdev, + unsigned *sd, unsigned *hd) +{ + unsigned i; + + *sd = 0; + *hd = 0; + + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (!atomic_read(&rdev->uvd.handles[i])) + continue; + + if (rdev->uvd.img_size[i] >= 720*576) + ++(*hd); + else + ++(*sd); + } +} + static void radeon_uvd_idle_work_handler(struct work_struct *work) { struct radeon_device *rdev = -- cgit v1.2.3 From ce3537d57196dfc7094755532e1ffc1af133ca5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 24 Jul 2013 12:12:49 -0400 Subject: drm/radeon/dpm: use multiple UVD power states (v3) Use the UVD handle information to determine which which power states to select when using UVD. For example, decoding a single SD stream requires much lower clocks than multiple HD streams. v2: switch to a cleaner dpm/uvd interface v3: change the uvd power state while streams are active if need be Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 5 ++++- drivers/gpu/drm/radeon/radeon_cs.c | 11 +++++++---- drivers/gpu/drm/radeon/radeon_pm.c | 30 ++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_uvd.c | 23 ++++++++++++++++------- 4 files changed, 57 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index fc8d03be933c..a276f0267433 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1360,11 +1360,14 @@ struct radeon_dpm { struct radeon_dpm_thermal thermal; /* forced levels */ enum radeon_dpm_forced_level forced_level; + /* track UVD streams */ + unsigned sd; + unsigned hd; }; void radeon_dpm_enable_power_state(struct radeon_device *rdev, enum radeon_pm_state_type dpm_state); - +void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); struct radeon_pm { struct mutex mutex; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 13a130fb3517..5384fa42c16e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, DRM_ERROR("Invalid command stream !\n"); return r; } + + if (parser->ring == R600_RING_TYPE_UVD_INDEX) + radeon_uvd_note_usage(rdev); + radeon_cs_sync_rings(parser); r = radeon_ib_schedule(rdev, &parser->ib, NULL); if (r) { @@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, return r; } + if (parser->ring == R600_RING_TYPE_UVD_INDEX) + radeon_uvd_note_usage(rdev); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); r = radeon_vm_alloc_pt(rdev, vm); @@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; } - /* XXX pick SD/HD/MVC */ - if (parser.ring == R600_RING_TYPE_UVD_INDEX) - radeon_uvd_note_usage(rdev); - r = radeon_cs_ib_chunk(rdev, &parser); if (r) { goto out; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index c557850cd345..59d7a0c86589 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -729,6 +729,8 @@ restart_search: /* use a fallback state if we didn't match */ switch (dpm_state) { case POWER_STATE_TYPE_INTERNAL_UVD_SD: + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + goto restart_search; case POWER_STATE_TYPE_INTERNAL_UVD_HD: case POWER_STATE_TYPE_INTERNAL_UVD_HD2: case POWER_STATE_TYPE_INTERNAL_UVD_MVC: @@ -884,6 +886,34 @@ void radeon_dpm_enable_power_state(struct radeon_device *rdev, radeon_pm_compute_clocks(rdev); } +void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) +{ + enum radeon_pm_state_type dpm_state; + + if (enable) { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.uvd_active = true; + if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; + else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; + else + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; + rdev->pm.dpm.state = dpm_state; + mutex_unlock(&rdev->pm.mutex); + } else { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.uvd_active = false; + mutex_unlock(&rdev->pm.mutex); + } + + radeon_pm_compute_clocks(rdev); +} + static void radeon_pm_suspend_old(struct radeon_device *rdev) { mutex_lock(&rdev->pm.mutex); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 2a4cff1acf02..1a01bbff9bfa 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -775,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work) if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { - mutex_lock(&rdev->pm.mutex); - rdev->pm.dpm.uvd_active = false; - mutex_unlock(&rdev->pm.mutex); - radeon_pm_compute_clocks(rdev); + radeon_dpm_enable_uvd(rdev, false); } else { radeon_set_uvd_clocks(rdev, 0, 0); } @@ -790,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work) void radeon_uvd_note_usage(struct radeon_device *rdev) { + bool streams_changed = false; bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); - if (set_clocks) { + + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + unsigned hd = 0, sd = 0; + radeon_uvd_count_handles(rdev, &sd, &hd); + if ((rdev->pm.dpm.sd != sd) || + (rdev->pm.dpm.hd != hd)) { + rdev->pm.dpm.sd = sd; + rdev->pm.dpm.hd = hd; + streams_changed = true; + } + } + + if (set_clocks || streams_changed) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { - /* XXX pick SD/HD/MVC */ - radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD); + radeon_dpm_enable_uvd(rdev, true); } else { radeon_set_uvd_clocks(rdev, 53300, 40000); } -- cgit v1.2.3 From 60320347617c0d97de7dffabcdf617d35cf57b46 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 24 Jul 2013 14:59:48 -0400 Subject: drm/radeon/dpm: rework thermal state handling 1. Handle the the thermal state directly in the work handler. Remove the state selection function since nothing else uses it now. 2. On some asics there is no thermal state, so we just use a regular state and force the low performance state. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 2 -- drivers/gpu/drm/radeon/radeon_pm.c | 44 +++++++++++++------------------------- 2 files changed, 15 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a276f0267433..034782804846 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1365,8 +1365,6 @@ struct radeon_dpm { unsigned hd; }; -void radeon_dpm_enable_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state); void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); struct radeon_pm { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 59d7a0c86589..b72508bcfc7b 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -624,7 +624,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) /* switch back the user state */ dpm_state = rdev->pm.dpm.user_state; } - radeon_dpm_enable_power_state(rdev, dpm_state); + mutex_lock(&rdev->pm.mutex); + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) + rdev->pm.dpm.thermal_active = true; + else + rdev->pm.dpm.thermal_active = false; + rdev->pm.dpm.state = dpm_state; + mutex_unlock(&rdev->pm.mutex); + + radeon_pm_compute_clocks(rdev); } static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, @@ -852,40 +860,18 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) radeon_dpm_post_set_power_state(rdev); + /* force low perf level for thermal */ + if (rdev->pm.dpm.thermal_active && + rdev->asic->dpm.force_performance_level) { + radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); + } + done: mutex_unlock(&rdev->ring_lock); up_write(&rdev->pm.mclk_lock); mutex_unlock(&rdev->ddev->struct_mutex); } -void radeon_dpm_enable_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state) -{ - if (!rdev->pm.dpm_enabled) - return; - - mutex_lock(&rdev->pm.mutex); - switch (dpm_state) { - case POWER_STATE_TYPE_INTERNAL_THERMAL: - rdev->pm.dpm.thermal_active = true; - break; - case POWER_STATE_TYPE_INTERNAL_UVD: - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - rdev->pm.dpm.uvd_active = true; - break; - default: - rdev->pm.dpm.thermal_active = false; - rdev->pm.dpm.uvd_active = false; - break; - } - rdev->pm.dpm.state = dpm_state; - mutex_unlock(&rdev->pm.mutex); - radeon_pm_compute_clocks(rdev); -} - void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) { enum radeon_pm_state_type dpm_state; -- cgit v1.2.3 From edcd26e81f71ffecdda0c28b25f07feeddae42db Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 5 Jul 2013 17:16:51 -0400 Subject: drm/radeon: default to 1024M gart size on rv770+ Newer asics have a lot of vram so it's less of an issue to waste a little more space for the gart page table. This gives us some additional gart space before having to migrate to non-gart system ram for games, etc. where we use up most of vram. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_device.c | 22 +++++++++++++++++----- drivers/gpu/drm/radeon/radeon_drv.c | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 63398ae1dbf5..e887641ba003 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev) radeon_vram_limit = 0; } + if (radeon_gart_size == -1) { + /* default to a larger gart size on newer asics */ + if (rdev->family >= CHIP_RV770) + radeon_gart_size = 1024; + else + radeon_gart_size = 512; + } /* gtt size must be power of two and greater or equal to 32M */ if (radeon_gart_size < 32) { - dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", + dev_warn(rdev->dev, "gart size (%d) too small\n", radeon_gart_size); - radeon_gart_size = 512; - + if (rdev->family >= CHIP_RV770) + radeon_gart_size = 1024; + else + radeon_gart_size = 512; } else if (!radeon_check_pot_argument(radeon_gart_size)) { dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", radeon_gart_size); - radeon_gart_size = 512; + if (rdev->family >= CHIP_RV770) + radeon_gart_size = 1024; + else + radeon_gart_size = 512; } rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; @@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev, rdev->family = flags & RADEON_FAMILY_MASK; rdev->is_atom_bios = false; rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + rdev->mc.gtt_size = 512 * 1024 * 1024; rdev->accel_working = false; /* set up ring ids */ for (i = 0; i < RADEON_NUM_RINGS; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 29876b1be8ec..62bd176a7289 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -154,7 +154,7 @@ int radeon_dynclks = -1; int radeon_r4xx_atom = 0; int radeon_agpmode = 0; int radeon_vram_limit = 0; -int radeon_gart_size = 512; /* default gart size */ +int radeon_gart_size = -1; /* auto */ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; @@ -187,7 +187,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600); MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); module_param_named(agpmode, radeon_agpmode, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)"); +MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); module_param_named(gartsize, radeon_gart_size, int, 0600); MODULE_PARM_DESC(benchmark, "Run benchmark"); -- cgit v1.2.3 From d4d3278c656cc179d3f334a7b49e88a6964bee68 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Jun 2013 17:55:39 -0400 Subject: drm/radeon/dpm: use performance state if no UVD state Newer asics don't have specific UVD states. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_pm.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index b72508bcfc7b..79a03de4ac0a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -695,7 +695,10 @@ restart_search: break; /* internal states */ case POWER_STATE_TYPE_INTERNAL_UVD: - return rdev->pm.dpm.uvd_ps; + if (rdev->pm.dpm.uvd_ps) + return rdev->pm.dpm.uvd_ps; + else + break; case POWER_STATE_TYPE_INTERNAL_UVD_SD: if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) return ps; @@ -742,7 +745,12 @@ restart_search: case POWER_STATE_TYPE_INTERNAL_UVD_HD: case POWER_STATE_TYPE_INTERNAL_UVD_HD2: case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - return rdev->pm.dpm.uvd_ps; + if (rdev->pm.dpm.uvd_ps) { + return rdev->pm.dpm.uvd_ps; + } else { + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + } case POWER_STATE_TYPE_INTERNAL_THERMAL: dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; goto restart_search; -- cgit v1.2.3 From 58ea2deab36ecf0b416d3486442cc6df693dcc79 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 Jan 2013 10:03:39 -0500 Subject: drm/radeon/kms: fix up dce8 display watermark calc for dpm Calculate the low and high watermarks based on the low and high clocks for the current power state. The dynamic pm hw will select the appropriate watermark based on the internal dpm state. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 96 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 74 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 8928bd109c16..42b143e75d42 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6826,7 +6826,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev, u32 lb_size, u32 num_heads) { struct drm_display_mode *mode = &radeon_crtc->base.mode; - struct dce8_wm_params wm; + struct dce8_wm_params wm_low, wm_high; u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; @@ -6836,35 +6836,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev, pixel_period = 1000000 / (u32)mode->clock; line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); - wm.yclk = rdev->pm.current_mclk * 10; - wm.sclk = rdev->pm.current_sclk * 10; - wm.disp_clk = mode->clock; - wm.src_width = mode->crtc_hdisplay; - wm.active_time = mode->crtc_hdisplay * pixel_period; - wm.blank_time = line_time - wm.active_time; - wm.interlaced = false; + /* watermark for high clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && + rdev->pm.dpm_enabled) { + wm_high.yclk = + radeon_dpm_get_mclk(rdev, false) * 10; + wm_high.sclk = + radeon_dpm_get_sclk(rdev, false) * 10; + } else { + wm_high.yclk = rdev->pm.current_mclk * 10; + wm_high.sclk = rdev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm.interlaced = true; - wm.vsc = radeon_crtc->vsc; - wm.vtaps = 1; + wm_high.interlaced = true; + wm_high.vsc = radeon_crtc->vsc; + wm_high.vtaps = 1; if (radeon_crtc->rmx_type != RMX_OFF) - wm.vtaps = 2; - wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm.lb_size = lb_size; - wm.dram_channels = cik_get_number_of_dram_channels(rdev); - wm.num_heads = num_heads; + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = cik_get_number_of_dram_channels(rdev); + wm_high.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535); + latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce8_check_latency_hiding(&wm_high) || + (rdev->disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + + /* watermark for low clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && + rdev->pm.dpm_enabled) { + wm_low.yclk = + radeon_dpm_get_mclk(rdev, true) * 10; + wm_low.sclk = + radeon_dpm_get_sclk(rdev, true) * 10; + } else { + wm_low.yclk = rdev->pm.current_mclk * 10; + wm_low.sclk = rdev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = radeon_crtc->vsc; + wm_low.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = cik_get_number_of_dram_channels(rdev); + wm_low.num_heads = num_heads; + /* set for low clocks */ - /* wm.yclk = low clk; wm.sclk = low clk */ - latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535); + latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ - if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || - !dce8_average_bandwidth_vs_available_bandwidth(&wm) || - !dce8_check_latency_hiding(&wm) || + if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce8_check_latency_hiding(&wm_low) || (rdev->disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } @@ -6889,6 +6936,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev, LATENCY_HIGH_WATERMARK(line_time))); /* restore original selection */ WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask); + + /* save values for DPM */ + radeon_crtc->line_time = line_time; + radeon_crtc->wm_high = latency_watermark_a; + radeon_crtc->wm_low = latency_watermark_b; } /** -- cgit v1.2.3 From 62c35fd7d2e6d383301f029e63f6b7d17ea82637 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Feb 2013 18:15:06 -0500 Subject: drm/radeon/cik: implement some more atom helpers for DPM Required for DPM on CIK. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_atombios.c | 115 +++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4ccd61f60eb6..27de73c162c9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -3077,6 +3077,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); } +int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev, + u16 *leakage_id) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (crev) { + case 3: + case 4: + args.v3.ucVoltageType = 0; + args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID; + args.v3.usVoltageLevel = 0; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *leakage_id = le16_to_cpu(args.v3.usVoltageLevel); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + +int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev, + u16 *vddc, u16 *vddci, + u16 virtual_voltage_id, + u16 vbios_voltage_id) +{ + int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); + u8 frev, crev; + u16 data_offset, size; + int i, j; + ATOM_ASIC_PROFILING_INFO_V2_1 *profile; + u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; + + *vddc = 0; + *vddci = 0; + + if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) + return -EINVAL; + + profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 1: + return -EINVAL; + case 2: + switch (crev) { + case 1: + if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1)) + return -EINVAL; + leakage_bin = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usLeakageBinArrayOffset)); + vddc_id_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDC_IdArrayOffset)); + vddc_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDC_LevelArrayOffset)); + vddci_id_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDCI_IdArrayOffset)); + vddci_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset)); + + if (profile->ucElbVDDC_Num > 0) { + for (i = 0; i < profile->ucElbVDDC_Num; i++) { + if (vddc_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (vbios_voltage_id <= leakage_bin[j]) { + *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + if (profile->ucElbVDDCI_Num > 0) { + for (i = 0; i < profile->ucElbVDDCI_Num; i++) { + if (vddci_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (vbios_voltage_id <= leakage_bin[j]) { + *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; + break; + } + } + break; + } + } + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type, u32 *gpio_value, u32 *gpio_mask) -- cgit v1.2.3 From 8c68e3938863460b6c224a3871d5d1ac8bb4b09f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Jun 2013 15:38:37 -0400 Subject: drm/radeon: switch CIK to use radeon_ucode.h Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 17 +---------------- drivers/gpu/drm/radeon/radeon_ucode.h | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 42b143e75d42..2ceb9003206c 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -30,22 +30,7 @@ #include "cikd.h" #include "atom.h" #include "cik_blit_shaders.h" - -/* GFX */ -#define CIK_PFP_UCODE_SIZE 2144 -#define CIK_ME_UCODE_SIZE 2144 -#define CIK_CE_UCODE_SIZE 2144 -/* compute */ -#define CIK_MEC_UCODE_SIZE 4192 -/* interrupts */ -#define BONAIRE_RLC_UCODE_SIZE 2048 -#define KB_RLC_UCODE_SIZE 2560 -#define KV_RLC_UCODE_SIZE 2560 -/* gddr controller */ -#define CIK_MC_UCODE_SIZE 7866 -/* sdma */ -#define CIK_SDMA_UCODE_SIZE 1050 -#define CIK_SDMA_UCODE_VERSION 64 +#include "radeon_ucode.h" MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index d8b05f7bcf1a..fad27c051bbf 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -35,6 +35,12 @@ #define SI_PFP_UCODE_SIZE 2144 #define SI_PM4_UCODE_SIZE 2144 #define SI_CE_UCODE_SIZE 2144 +#define CIK_PFP_UCODE_SIZE 2144 +#define CIK_ME_UCODE_SIZE 2144 +#define CIK_CE_UCODE_SIZE 2144 + +/* MEC */ +#define CIK_MEC_UCODE_SIZE 4192 /* RLC */ #define R600_RLC_UCODE_SIZE 768 @@ -43,12 +49,20 @@ #define CAYMAN_RLC_UCODE_SIZE 1024 #define ARUBA_RLC_UCODE_SIZE 1536 #define SI_RLC_UCODE_SIZE 2048 +#define BONAIRE_RLC_UCODE_SIZE 2048 +#define KB_RLC_UCODE_SIZE 2560 +#define KV_RLC_UCODE_SIZE 2560 /* MC */ #define BTC_MC_UCODE_SIZE 6024 #define CAYMAN_MC_UCODE_SIZE 6037 #define SI_MC_UCODE_SIZE 7769 #define OLAND_MC_UCODE_SIZE 7863 +#define CIK_MC_UCODE_SIZE 7866 + +/* SDMA */ +#define CIK_SDMA_UCODE_SIZE 1050 +#define CIK_SDMA_UCODE_VERSION 64 /* SMC */ #define RV770_SMC_UCODE_START 0x0100 -- cgit v1.2.3 From 8a7cd27679d0451c7cf072af70acce51d15c446d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Aug 2013 11:29:39 -0400 Subject: drm/radeon/cik: add support for pcie gen1/2/3 switching Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 161 ++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/cikd.h | 57 +++++++++++++++ 2 files changed, 218 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 2ceb9003206c..a30fb32c2301 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -61,6 +61,7 @@ extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *m extern void si_rlc_fini(struct radeon_device *rdev); extern int si_rlc_init(struct radeon_device *rdev); static void cik_rlc_stop(struct radeon_device *rdev); +static void cik_pcie_gen3_enable(struct radeon_device *rdev); /* * Indirect registers accessor @@ -5949,6 +5950,9 @@ static int cik_startup(struct radeon_device *rdev) struct radeon_ring *ring; int r; + /* enable pcie gen2/3 link */ + cik_pcie_gen3_enable(rdev); + cik_mc_program(rdev); if (rdev->flags & RADEON_IS_IGP) { @@ -7051,3 +7055,160 @@ int cik_uvd_resume(struct radeon_device *rdev) return 0; } + +static void cik_pcie_gen3_enable(struct radeon_device *rdev) +{ + struct pci_dev *root = rdev->pdev->bus->self; + int bridge_pos, gpu_pos; + u32 speed_cntl, mask, current_data_rate; + int ret, i; + u16 tmp16; + + if (radeon_pcie_gen2 == 0) + return; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + return; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> + LC_CURRENT_DATA_RATE_SHIFT; + if (mask & DRM_PCIE_SPEED_80) { + if (current_data_rate == 2) { + DRM_INFO("PCIE gen 3 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n"); + } else if (mask & DRM_PCIE_SPEED_50) { + if (current_data_rate == 1) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); + } + + bridge_pos = pci_pcie_cap(root); + if (!bridge_pos) + return; + + gpu_pos = pci_pcie_cap(rdev->pdev); + if (!gpu_pos) + return; + + if (mask & DRM_PCIE_SPEED_80) { + /* re-try equalization if gen3 is not already enabled */ + if (current_data_rate != 2) { + u16 bridge_cfg, gpu_cfg; + u16 bridge_cfg2, gpu_cfg2; + u32 max_lw, current_lw, tmp; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1); + max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; + current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT; + + if (current_lw < max_lw) { + tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + if (tmp & LC_RENEGOTIATION_SUPPORT) { + tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS); + tmp |= (max_lw << LC_LINK_WIDTH_SHIFT); + tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW; + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp); + } + } + + for (i = 0; i < 10; i++) { + /* check status */ + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + if (tmp16 & PCI_EXP_DEVSTA_TRPND) + break; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_REDO_EQ; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + mdelay(100); + + /* linkctl */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + /* linkctl2 */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp &= ~LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + } + } + } + + /* set the link speed */ + speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE; + speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~0xf; + if (mask & DRM_PCIE_SPEED_80) + tmp16 |= 3; /* gen3 */ + else if (mask & DRM_PCIE_SPEED_50) + tmp16 |= 2; /* gen2 */ + else + tmp16 |= 1; /* gen1 */ + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + for (i = 0; i < rdev->usec_timeout; i++) { + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0) + break; + udelay(1); + } +} diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 7e9275eaef80..a1f376e6dd31 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -35,6 +35,63 @@ #define CG_CLKPIN_CNTL 0xC05001A0 # define XTALIN_DIVIDE (1 << 1) +/* PCIE registers idx/data 0x38/0x3c */ +#define PCIE_LC_STATUS1 0x1400028 /* PCIE */ +# define LC_REVERSE_RCVR (1 << 0) +# define LC_REVERSE_XMIT (1 << 1) +# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2) +# define LC_OPERATING_LINK_WIDTH_SHIFT 2 +# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5) +# define LC_DETECTED_LINK_WIDTH_SHIFT 5 + +#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */ +# define LC_LINK_WIDTH_SHIFT 0 +# define LC_LINK_WIDTH_MASK 0x7 +# define LC_LINK_WIDTH_X0 0 +# define LC_LINK_WIDTH_X1 1 +# define LC_LINK_WIDTH_X2 2 +# define LC_LINK_WIDTH_X4 3 +# define LC_LINK_WIDTH_X8 4 +# define LC_LINK_WIDTH_X16 6 +# define LC_LINK_WIDTH_RD_SHIFT 4 +# define LC_LINK_WIDTH_RD_MASK 0x70 +# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) +# define LC_RECONFIG_NOW (1 << 8) +# define LC_RENEGOTIATION_SUPPORT (1 << 9) +# define LC_RENEGOTIATE_EN (1 << 10) +# define LC_SHORT_RECONFIG_EN (1 << 11) +# define LC_UPCONFIGURE_SUPPORT (1 << 12) +# define LC_UPCONFIGURE_DIS (1 << 13) +# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21) +# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21) +# define LC_DYN_LANES_PWR_STATE_SHIFT 21 + +#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */ +# define LC_GEN2_EN_STRAP (1 << 0) +# define LC_GEN3_EN_STRAP (1 << 1) +# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2) +# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3) +# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3 +# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5) +# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6) +# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7) +# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8) +# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10 +# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */ +# define LC_CURRENT_DATA_RATE_SHIFT 13 +# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16) +# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18) +# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19) +# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20) +# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21) + +#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */ +# define LC_REDO_EQ (1 << 5) +# define LC_SET_QUIESCE (1 << 13) + +/* direct registers */ #define PCIE_INDEX 0x38 #define PCIE_DATA 0x3C -- cgit v1.2.3 From 7235711a43b6839f5759327d003fa334c4a703f2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 4 Apr 2013 13:58:09 -0400 Subject: drm/radeon: add support for ASPM on CIK asics Enables PCIE ASPM (Active State Power Management) on CIK asics. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 151 ++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/cikd.h | 72 +++++++++++++++++++- 2 files changed, 222 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a30fb32c2301..0da9d6715ef5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -62,6 +62,7 @@ extern void si_rlc_fini(struct radeon_device *rdev); extern int si_rlc_init(struct radeon_device *rdev); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); +static void cik_program_aspm(struct radeon_device *rdev); /* * Indirect registers accessor @@ -5952,6 +5953,8 @@ static int cik_startup(struct radeon_device *rdev) /* enable pcie gen2/3 link */ cik_pcie_gen3_enable(rdev); + /* enable aspm */ + cik_program_aspm(rdev); cik_mc_program(rdev); @@ -7212,3 +7215,151 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) udelay(1); } } + +static void cik_program_aspm(struct radeon_device *rdev) +{ + u32 data, orig; + bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; + bool disable_clkreq = false; + + if (radeon_aspm == 0) + return; + + /* XXX double check IGPs */ + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + data &= ~LC_XMIT_N_FTS_MASK; + data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3); + data |= LC_GO_TO_RECOVERY; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL3, data); + + orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL); + data |= P_IGNORE_EDB_ERR; + if (orig != data) + WREG32_PCIE_PORT(PCIE_P_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK); + data |= LC_PMI_TO_L1_DIS; + if (!disable_l0s) + data |= LC_L0S_INACTIVITY(7); + + if (!disable_l1) { + data |= LC_L1_INACTIVITY(7); + data &= ~LC_PMI_TO_L1_DIS; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + + if (!disable_plloff_in_l1) { + bool clk_req_support; + + orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + data &= ~LC_DYN_LANES_PWR_STATE_MASK; + data |= LC_DYN_LANES_PWR_STATE(3); + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); + + if (!disable_clkreq) { + struct pci_dev *root = rdev->pdev->bus->self; + u32 lnkcap; + + clk_req_support = false; + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap & PCI_EXP_LNKCAP_CLKPM) + clk_req_support = true; + } else { + clk_req_support = false; + } + + if (clk_req_support) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2); + data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL2, data); + + orig = data = RREG32_SMC(THM_CLK_CNTL); + data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK); + data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1); + if (orig != data) + WREG32_SMC(THM_CLK_CNTL, data); + + orig = data = RREG32_SMC(MISC_CLK_CTRL); + data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK); + data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1); + if (orig != data) + WREG32_SMC(MISC_CLK_CTRL, data); + + orig = data = RREG32_SMC(CG_CLKPIN_CNTL); + data &= ~BCLK_AS_XCLK; + if (orig != data) + WREG32_SMC(CG_CLKPIN_CNTL, data); + + orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2); + data &= ~FORCE_BIF_REFCLK_EN; + if (orig != data) + WREG32_SMC(CG_CLKPIN_CNTL_2, data); + + orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL); + data &= ~MPLL_CLKOUT_SEL_MASK; + data |= MPLL_CLKOUT_SEL(4); + if (orig != data) + WREG32_SMC(MPLL_BYPASSCLK_SEL, data); + } + } + } else { + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + + orig = data = RREG32_PCIE_PORT(PCIE_CNTL2); + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN; + if (orig != data) + WREG32_PCIE_PORT(PCIE_CNTL2, data); + + if (!disable_l0s) { + data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) { + data = RREG32_PCIE_PORT(PCIE_LC_STATUS1); + if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~LC_L0S_INACTIVITY_MASK; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + } + } +} diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index a1f376e6dd31..d50593fd9cd5 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -32,10 +32,53 @@ #define GENERAL_PWRMGT 0xC0200000 # define GPU_COUNTER_CLK (1 << 15) +#define MPLL_BYPASSCLK_SEL 0xC050019C +# define MPLL_CLKOUT_SEL(x) ((x) << 8) +# define MPLL_CLKOUT_SEL_MASK 0xFF00 #define CG_CLKPIN_CNTL 0xC05001A0 # define XTALIN_DIVIDE (1 << 1) +# define BCLK_AS_XCLK (1 << 2) +#define CG_CLKPIN_CNTL_2 0xC05001A4 +# define FORCE_BIF_REFCLK_EN (1 << 3) +# define MUX_TCLK_TO_XCLK (1 << 8) +#define THM_CLK_CNTL 0xC05001A8 +# define CMON_CLK_SEL(x) ((x) << 0) +# define CMON_CLK_SEL_MASK 0xFF +# define TMON_CLK_SEL(x) ((x) << 8) +# define TMON_CLK_SEL_MASK 0xFF00 +#define MISC_CLK_CTRL 0xC05001AC +# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0) +# define DEEP_SLEEP_CLK_SEL_MASK 0xFF +# define ZCLK_SEL(x) ((x) << 8) +# define ZCLK_SEL_MASK 0xFF00 /* PCIE registers idx/data 0x38/0x3c */ +#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */ +# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7) +# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7) +# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7 +# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10) +# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10) +# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10 +# define PLL_RAMP_UP_TIME_0(x) ((x) << 24) +# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24) +# define PLL_RAMP_UP_TIME_0_SHIFT 24 +#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */ +# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7) +# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7) +# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7 +# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10) +# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10) +# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10 +# define PLL_RAMP_UP_TIME_1(x) ((x) << 24) +# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24) +# define PLL_RAMP_UP_TIME_1_SHIFT 24 + +#define PCIE_CNTL2 0x1001001c /* PCIE */ +# define SLV_MEM_LS_EN (1 << 16) +# define MST_MEM_LS_EN (1 << 18) +# define REPLAY_MEM_LS_EN (1 << 19) + #define PCIE_LC_STATUS1 0x1400028 /* PCIE */ # define LC_REVERSE_RCVR (1 << 0) # define LC_REVERSE_XMIT (1 << 1) @@ -44,6 +87,22 @@ # define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5) # define LC_DETECTED_LINK_WIDTH_SHIFT 5 +#define PCIE_P_CNTL 0x1400040 /* PCIE */ +# define P_IGNORE_EDB_ERR (1 << 6) + +#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */ +#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */ + +#define PCIE_LC_CNTL 0x100100A0 /* PCIE */ +# define LC_L0S_INACTIVITY(x) ((x) << 8) +# define LC_L0S_INACTIVITY_MASK (0xf << 8) +# define LC_L0S_INACTIVITY_SHIFT 8 +# define LC_L1_INACTIVITY(x) ((x) << 12) +# define LC_L1_INACTIVITY_MASK (0xf << 12) +# define LC_L1_INACTIVITY_SHIFT 12 +# define LC_PMI_TO_L1_DIS (1 << 16) +# define LC_ASPM_TO_L1_DIS (1 << 24) + #define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */ # define LC_LINK_WIDTH_SHIFT 0 # define LC_LINK_WIDTH_MASK 0x7 @@ -65,7 +124,12 @@ # define LC_DYN_LANES_PWR_STATE(x) ((x) << 21) # define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21) # define LC_DYN_LANES_PWR_STATE_SHIFT 21 - +#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */ +# define LC_XMIT_N_FTS(x) ((x) << 0) +# define LC_XMIT_N_FTS_MASK (0xff << 0) +# define LC_XMIT_N_FTS_SHIFT 0 +# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8) +# define LC_N_FTS_MASK (0xff << 24) #define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */ # define LC_GEN2_EN_STRAP (1 << 0) # define LC_GEN3_EN_STRAP (1 << 1) @@ -87,6 +151,12 @@ # define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20) # define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21) +#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */ +# define LC_ALLOW_PDWN_IN_L1 (1 << 17) +# define LC_ALLOW_PDWN_IN_L23 (1 << 18) + +#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */ +# define LC_GO_TO_RECOVERY (1 << 30) #define PCIE_LC_CNTL4 0x100100B6 /* PCIE */ # define LC_REDO_EQ (1 << 5) # define LC_SET_QUIESCE (1 << 13) -- cgit v1.2.3 From 866d83de0c9cc36a598252282bdedc158f50dcc2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 15 Apr 2013 17:13:29 -0400 Subject: drm/radeon/cik: restructure rlc setup Restructure rlc setup to handle clock and power gating. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 84 +++++++++++++++++++++++++++---------------- drivers/gpu/drm/radeon/cikd.h | 1 + drivers/gpu/drm/radeon/si.c | 2 +- 3 files changed, 56 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0da9d6715ef5..e92a9721ca25 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -60,6 +60,7 @@ extern bool evergreen_is_display_hung(struct radeon_device *rdev); extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void si_rlc_fini(struct radeon_device *rdev); extern int si_rlc_init(struct radeon_device *rdev); +extern void si_rlc_reset(struct radeon_device *rdev); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); @@ -4728,31 +4729,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm * variety of functions, the most important of which is * the interrupt controller. */ -/** - * cik_rlc_stop - stop the RLC ME - * - * @rdev: radeon_device pointer - * - * Halt the RLC ME (MicroEngine) (CIK). - */ -static void cik_rlc_stop(struct radeon_device *rdev) +static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable) { - int i, j, k; - u32 mask, tmp; + u32 tmp = RREG32(CP_INT_CNTL_RING0); - tmp = RREG32(CP_INT_CNTL_RING0); - tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + if (enable) + tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + else + tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(CP_INT_CNTL_RING0, tmp); +} - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); +static void cik_enable_lbpw(struct radeon_device *rdev, bool enable) +{ + u32 tmp; - tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; - WREG32(RLC_CGCG_CGLS_CTRL, tmp); + tmp = RREG32(RLC_LB_CNTL); + if (enable) + tmp |= LOAD_BALANCE_ENABLE; + else + tmp &= ~LOAD_BALANCE_ENABLE; + WREG32(RLC_LB_CNTL, tmp); +} - WREG32(RLC_CNTL, 0); +static void cik_wait_for_rlc_serdes(struct radeon_device *rdev) +{ + u32 i, j, k; + u32 mask; for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { @@ -4774,6 +4778,32 @@ static void cik_rlc_stop(struct radeon_device *rdev) } } +/** + * cik_rlc_stop - stop the RLC ME + * + * @rdev: radeon_device pointer + * + * Halt the RLC ME (MicroEngine) (CIK). + */ +static void cik_rlc_stop(struct radeon_device *rdev) +{ + u32 tmp; + + cik_enable_gui_idle_interrupt(rdev, false); + + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; + WREG32(RLC_CGCG_CGLS_CTRL, tmp); + + WREG32(RLC_CNTL, 0); + + cik_wait_for_rlc_serdes(rdev); +} + /** * cik_rlc_start - start the RLC ME * @@ -4783,13 +4813,9 @@ static void cik_rlc_stop(struct radeon_device *rdev) */ static void cik_rlc_start(struct radeon_device *rdev) { - u32 tmp; - WREG32(RLC_CNTL, RLC_ENABLE); - tmp = RREG32(CP_INT_CNTL_RING0); - tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); - WREG32(CP_INT_CNTL_RING0, tmp); + cik_enable_gui_idle_interrupt(rdev, true); udelay(50); } @@ -4827,12 +4853,7 @@ static int cik_rlc_resume(struct radeon_device *rdev) cik_rlc_stop(rdev); - WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC); - RREG32(GRBM_SOFT_RESET); - udelay(50); - WREG32(GRBM_SOFT_RESET, 0); - RREG32(GRBM_SOFT_RESET); - udelay(50); + si_rlc_reset(rdev); WREG32(RLC_LB_CNTR_INIT, 0); WREG32(RLC_LB_CNTR_MAX, 0x00008000); @@ -4851,6 +4872,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); WREG32(RLC_GPM_UCODE_ADDR, 0); + /* XXX - find out what chips support lbpw */ + cik_enable_lbpw(rdev, false); + /* XXX */ clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr); clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index d50593fd9cd5..4742f3db4aa6 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -905,6 +905,7 @@ #define RLC_LB_CNTR_MAX 0xC348 #define RLC_LB_CNTL 0xC364 +# define LOAD_BALANCE_ENABLE (1 << 0) #define RLC_LB_CNTR_INIT 0xC36C diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index daa8d2df8ec5..610adfc86bea 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5435,7 +5435,7 @@ int si_rlc_init(struct radeon_device *rdev) return 0; } -static void si_rlc_reset(struct radeon_device *rdev) +void si_rlc_reset(struct radeon_device *rdev) { u32 tmp = RREG32(GRBM_SOFT_RESET); -- cgit v1.2.3 From 10b7ca7e09aa0d5f90265f130aee2b9270bfaadc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 Apr 2013 17:22:05 -0400 Subject: drm/radeon: clean up sumo_rlc_init() for code sharing This will eventually be shared with newer asics to reduce code duplication. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 221 +++++++++++++++++++------------------ 1 file changed, 113 insertions(+), 108 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a5ab5693eb2a..617840829114 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3910,131 +3910,136 @@ int sumo_rlc_init(struct radeon_device *rdev) dws = rdev->rlc.reg_list_size; cs_data = rdev->rlc.cs_data; - /* save restore block */ - if (rdev->rlc.save_restore_obj == NULL) { - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); + if (src_ptr) { + /* save restore block */ + if (rdev->rlc.save_restore_obj == NULL) { + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); + return r; + } + } + + r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); + if (unlikely(r != 0)) { + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.save_restore_gpu_addr); if (r) { - dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); + radeon_bo_unreserve(rdev->rlc.save_restore_obj); + dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); + sumo_rlc_fini(rdev); return r; } - } - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - sumo_rlc_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.save_restore_gpu_addr); - if (r) { + r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + /* write the sr buffer */ + dst_ptr = rdev->rlc.sr_ptr; + /* format: + * dw0: (reg2 << 16) | reg1 + * dw1: reg1 save space + * dw2: reg2 save space + */ + for (i = 0; i < dws; i++) { + data = src_ptr[i] >> 2; + i++; + if (i < dws) + data |= (src_ptr[i] >> 2) << 16; + j = (((i - 1) * 3) / 2); + dst_ptr[j] = data; + } + j = ((i * 3) / 2); + dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; + + radeon_bo_kunmap(rdev->rlc.save_restore_obj); radeon_bo_unreserve(rdev->rlc.save_restore_obj); - dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); - sumo_rlc_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); - sumo_rlc_fini(rdev); - return r; - } - /* write the sr buffer */ - dst_ptr = rdev->rlc.sr_ptr; - /* format: - * dw0: (reg2 << 16) | reg1 - * dw1: reg1 save space - * dw2: reg2 save space - */ - for (i = 0; i < dws; i++) { - data = src_ptr[i] >> 2; - i++; - if (i < dws) - data |= (src_ptr[i] >> 2) << 16; - j = (((i - 1) * 3) / 2); - dst_ptr[j] = data; } - j = ((i * 3) / 2); - dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; - - radeon_bo_kunmap(rdev->rlc.save_restore_obj); - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - /* clear state block */ - reg_list_num = 0; - dws = 0; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_list_num++; - dws += cs_data[i].section[j].reg_count; + if (cs_data) { + /* clear state block */ + reg_list_num = 0; + dws = 0; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_list_num++; + dws += cs_data[i].section[j].reg_count; + } } - } - reg_list_blk_index = (3 * reg_list_num + 2); - dws += reg_list_blk_index; + reg_list_blk_index = (3 * reg_list_num + 2); + dws += reg_list_blk_index; - if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); + if (rdev->rlc.clear_state_obj == NULL) { + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + } + r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); + if (unlikely(r != 0)) { + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.clear_state_gpu_addr); if (r) { - dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); + radeon_bo_unreserve(rdev->rlc.clear_state_obj); + dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); sumo_rlc_fini(rdev); return r; } - } - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - sumo_rlc_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.clear_state_gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); - sumo_rlc_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); - sumo_rlc_fini(rdev); - return r; - } - /* set up the cs buffer */ - dst_ptr = rdev->rlc.cs_ptr; - reg_list_hdr_blk_index = 0; - reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); - data = upper_32_bits(reg_list_mc_addr); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_num = cs_data[i].section[j].reg_count; - data = reg_list_mc_addr & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = 0x08000000 | (reg_num * 4); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - for (k = 0; k < reg_num; k++) { - data = cs_data[i].section[j].extent[k]; - dst_ptr[reg_list_blk_index + k] = data; + r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + /* set up the cs buffer */ + dst_ptr = rdev->rlc.cs_ptr; + reg_list_hdr_blk_index = 0; + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); + data = upper_32_bits(reg_list_mc_addr); + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_num = cs_data[i].section[j].reg_count; + data = reg_list_mc_addr & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + data = 0x08000000 | (reg_num * 4); + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + for (k = 0; k < reg_num; k++) { + data = cs_data[i].section[j].extent[k]; + dst_ptr[reg_list_blk_index + k] = data; + } + reg_list_mc_addr += reg_num * 4; + reg_list_blk_index += reg_num; } - reg_list_mc_addr += reg_num * 4; - reg_list_blk_index += reg_num; } - } - dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; + dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; - radeon_bo_kunmap(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); + radeon_bo_kunmap(rdev->rlc.clear_state_obj); + radeon_bo_unreserve(rdev->rlc.clear_state_obj); + } return 0; } -- cgit v1.2.3 From 1fd11777c2f0e6b6b37432b984bf40e3c6072f23 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 Apr 2013 17:53:50 -0400 Subject: drm/radeon: convert SI,CIK to use sumo_rlc functions and remove duplicate si_rlc functions. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 10 +- drivers/gpu/drm/radeon/clearstate_cayman.h | 2 +- drivers/gpu/drm/radeon/clearstate_evergreen.h | 2 +- drivers/gpu/drm/radeon/evergreen.c | 46 ++++--- drivers/gpu/drm/radeon/ni.c | 6 +- drivers/gpu/drm/radeon/radeon.h | 4 +- drivers/gpu/drm/radeon/si.c | 174 ++------------------------ 7 files changed, 49 insertions(+), 195 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e92a9721ca25..8389917af9a2 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -57,9 +57,9 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); extern bool evergreen_is_display_hung(struct radeon_device *rdev); +extern void sumo_rlc_fini(struct radeon_device *rdev); +extern int sumo_rlc_init(struct radeon_device *rdev); extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); -extern void si_rlc_fini(struct radeon_device *rdev); -extern int si_rlc_init(struct radeon_device *rdev); extern void si_rlc_reset(struct radeon_device *rdev); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); @@ -6019,7 +6019,7 @@ static int cik_startup(struct radeon_device *rdev) cik_gpu_init(rdev); /* allocate rlc buffers */ - r = si_rlc_init(rdev); + r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -6343,7 +6343,7 @@ int cik_init(struct radeon_device *rdev) cik_cp_fini(rdev); cik_sdma_fini(rdev); cik_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); cik_mec_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -6379,7 +6379,7 @@ void cik_fini(struct radeon_device *rdev) cik_cp_fini(rdev); cik_sdma_fini(rdev); cik_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); cik_mec_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h index c00339440c5e..aa908c55a513 100644 --- a/drivers/gpu/drm/radeon/clearstate_cayman.h +++ b/drivers/gpu/drm/radeon/clearstate_cayman.h @@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] = {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, { 0, 0, 0 } }; -struct cs_section_def cayman_cs_data[] = { +static const struct cs_section_def cayman_cs_data[] = { { SECT_CONTEXT_defs, SECT_CONTEXT }, { SECT_CLEAR_defs, SECT_CLEAR }, { SECT_CTRLCONST_defs, SECT_CTRLCONST }, diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h index 4791d856b7fd..63a1ffbb3ced 100644 --- a/drivers/gpu/drm/radeon/clearstate_evergreen.h +++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h @@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] = {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, { 0, 0, 0 } }; -struct cs_section_def evergreen_cs_data[] = { +static const struct cs_section_def evergreen_cs_data[] = { { SECT_CONTEXT_defs, SECT_CONTEXT }, { SECT_CLEAR_defs, SECT_CLEAR }, { SECT_CTRLCONST_defs, SECT_CTRLCONST }, diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 617840829114..6fc876a444d4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] = #include "clearstate_evergreen.h" -static u32 sumo_rlc_save_restore_register_list[] = +static const u32 sumo_rlc_save_restore_register_list[] = { 0x98fc, 0x9830, @@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] = 0x9150, 0x802c, }; -static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list); static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -3898,12 +3897,12 @@ void sumo_rlc_fini(struct radeon_device *rdev) int sumo_rlc_init(struct radeon_device *rdev) { - u32 *src_ptr; + const u32 *src_ptr; volatile u32 *dst_ptr; u32 dws, data, i, j, k, reg_num; u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; u64 reg_list_mc_addr; - struct cs_section_def *cs_data; + const struct cs_section_def *cs_data; int r; src_ptr = rdev->rlc.reg_list; @@ -3943,22 +3942,28 @@ int sumo_rlc_init(struct radeon_device *rdev) } /* write the sr buffer */ dst_ptr = rdev->rlc.sr_ptr; - /* format: - * dw0: (reg2 << 16) | reg1 - * dw1: reg1 save space - * dw2: reg2 save space - */ - for (i = 0; i < dws; i++) { - data = src_ptr[i] >> 2; - i++; - if (i < dws) - data |= (src_ptr[i] >> 2) << 16; - j = (((i - 1) * 3) / 2); - dst_ptr[j] = data; + if (rdev->family >= CHIP_TAHITI) { + /* SI */ + for (i = 0; i < dws; i++) + dst_ptr[i] = src_ptr[i]; + } else { + /* ON/LN/TN */ + /* format: + * dw0: (reg2 << 16) | reg1 + * dw1: reg1 save space + * dw2: reg2 save space + */ + for (i = 0; i < dws; i++) { + data = src_ptr[i] >> 2; + i++; + if (i < dws) + data |= (src_ptr[i] >> 2) << 16; + j = (((i - 1) * 3) / 2); + dst_ptr[j] = data; + } + j = ((i * 3) / 2); + dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; } - j = ((i * 3) / 2); - dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; - radeon_bo_kunmap(rdev->rlc.save_restore_obj); radeon_bo_unreserve(rdev->rlc.save_restore_obj); } @@ -5152,7 +5157,8 @@ static int evergreen_startup(struct radeon_device *rdev) /* allocate rlc buffers */ if (rdev->flags & RADEON_IS_IGP) { rdev->rlc.reg_list = sumo_rlc_save_restore_register_list; - rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list); rdev->rlc.cs_data = evergreen_cs_data; r = sumo_rlc_init(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index acdd6039ef14..21f2eceff2c6 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -35,7 +35,7 @@ #include "radeon_ucode.h" #include "clearstate_cayman.h" -static u32 tn_rlc_save_restore_register_list[] = +static const u32 tn_rlc_save_restore_register_list[] = { 0x98fc, 0x98f0, @@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] = 0x9830, 0x802c, }; -static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list); extern bool evergreen_is_display_hung(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); @@ -2121,7 +2120,8 @@ static int cayman_startup(struct radeon_device *rdev) /* allocate rlc buffers */ if (rdev->flags & RADEON_IS_IGP) { rdev->rlc.reg_list = tn_rlc_save_restore_register_list; - rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list); rdev->rlc.cs_data = cayman_cs_data; r = sumo_rlc_init(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 034782804846..b9706e83e827 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -854,13 +854,13 @@ struct radeon_rlc { struct radeon_bo *save_restore_obj; uint64_t save_restore_gpu_addr; volatile uint32_t *sr_ptr; - u32 *reg_list; + const u32 *reg_list; u32 reg_list_size; /* for clear state */ struct radeon_bo *clear_state_obj; uint64_t clear_state_gpu_addr; volatile uint32_t *cs_ptr; - struct cs_section_def *cs_data; + const struct cs_section_def *cs_data; }; int radeon_ib_get(struct radeon_device *rdev, int ring, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 610adfc86bea..8b8963d4a732 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); +extern void sumo_rlc_fini(struct radeon_device *rdev); +extern int sumo_rlc_init(struct radeon_device *rdev); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); @@ -5275,166 +5277,6 @@ static void si_fini_pg(struct radeon_device *rdev) /* * RLC */ -void si_rlc_fini(struct radeon_device *rdev) -{ - int r; - - /* save restore block */ - if (rdev->rlc.save_restore_obj) { - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); - radeon_bo_unpin(rdev->rlc.save_restore_obj); - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - - radeon_bo_unref(&rdev->rlc.save_restore_obj); - rdev->rlc.save_restore_obj = NULL; - } - - /* clear state block */ - if (rdev->rlc.clear_state_obj) { - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); - radeon_bo_unpin(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - - radeon_bo_unref(&rdev->rlc.clear_state_obj); - rdev->rlc.clear_state_obj = NULL; - } -} - -#define RLC_CLEAR_STATE_END_MARKER 0x00000001 - -int si_rlc_init(struct radeon_device *rdev) -{ - volatile u32 *dst_ptr; - u32 dws, data, i, j, k, reg_num; - u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; - u64 reg_list_mc_addr; - const struct cs_section_def *cs_data = si_cs_data; - int r; - - /* save restore block */ - if (rdev->rlc.save_restore_obj == NULL) { - r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, - &rdev->rlc.save_restore_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.save_restore_gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - - if (rdev->family == CHIP_VERDE) { - r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - /* write the sr buffer */ - dst_ptr = rdev->rlc.sr_ptr; - for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) { - dst_ptr[i] = verde_rlc_save_restore_register_list[i]; - } - radeon_bo_kunmap(rdev->rlc.save_restore_obj); - } - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - - /* clear state block */ - reg_list_num = 0; - dws = 0; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_list_num++; - dws += cs_data[i].section[j].reg_count; - } - } - reg_list_blk_index = (3 * reg_list_num + 2); - dws += reg_list_blk_index; - - if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - } - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.clear_state_gpu_addr); - if (r) { - - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - /* set up the cs buffer */ - dst_ptr = rdev->rlc.cs_ptr; - reg_list_hdr_blk_index = 0; - reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); - data = upper_32_bits(reg_list_mc_addr); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_num = cs_data[i].section[j].reg_count; - data = reg_list_mc_addr & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = 0x08000000 | (reg_num * 4); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - for (k = 0; k < reg_num; k++) { - data = cs_data[i].section[j].extent[k]; - dst_ptr[reg_list_blk_index + k] = data; - } - reg_list_mc_addr += reg_num * 4; - reg_list_blk_index += reg_num; - } - } - dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; - - radeon_bo_kunmap(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - - return 0; -} - void si_rlc_reset(struct radeon_device *rdev) { u32 tmp = RREG32(GRBM_SOFT_RESET); @@ -6449,7 +6291,13 @@ static int si_startup(struct radeon_device *rdev) si_gpu_init(rdev); /* allocate rlc buffers */ - r = si_rlc_init(rdev); + if (rdev->family == CHIP_VERDE) { + rdev->rlc.reg_list = verde_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list); + } + rdev->rlc.cs_data = si_cs_data; + r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -6735,7 +6583,7 @@ int si_init(struct radeon_device *rdev) si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); @@ -6761,7 +6609,7 @@ void si_fini(struct radeon_device *rdev) si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); si_fini_cg(rdev); si_fini_pg(rdev); radeon_wb_fini(rdev); -- cgit v1.2.3 From 22c775ce80ed921fe9490f3cc2ca66dcda44f572 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2013 09:41:05 -0400 Subject: drm/radeon: implement clock and power gating for CIK (v3) Only the APUs support power gating. v2: disable cgcg for now v3: workaround hw issue in mgcg Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 1475 +++++++++++++++++++++++++++++++- drivers/gpu/drm/radeon/cikd.h | 95 +- drivers/gpu/drm/radeon/clearstate_ci.h | 944 ++++++++++++++++++++ drivers/gpu/drm/radeon/evergreen.c | 57 +- drivers/gpu/drm/radeon/radeon.h | 14 + drivers/gpu/drm/radeon/radeon_asic.c | 2 + drivers/gpu/drm/radeon/si.c | 2 +- 7 files changed, 2565 insertions(+), 24 deletions(-) create mode 100644 drivers/gpu/drm/radeon/clearstate_ci.h diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 8389917af9a2..a36e98c9a875 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -31,6 +31,7 @@ #include "atom.h" #include "cik_blit_shaders.h" #include "radeon_ucode.h" +#include "clearstate_ci.h" MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); @@ -61,9 +62,12 @@ extern void sumo_rlc_fini(struct radeon_device *rdev); extern int sumo_rlc_init(struct radeon_device *rdev); extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void si_rlc_reset(struct radeon_device *rdev); +extern void si_init_uvd_internal_cg(struct radeon_device *rdev); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); +static void cik_init_pg(struct radeon_device *rdev); +static void cik_init_cg(struct radeon_device *rdev); /* * Indirect registers accessor @@ -86,6 +90,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) (void)RREG32(PCIE_DATA); } +static const u32 spectre_rlc_save_restore_register_list[] = +{ + (0x0e00 << 16) | (0xc12c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc140 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc150 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc15c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc168 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc170 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc178 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc204 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8228 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x829c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x869c >> 2), + 0x00000000, + (0x0600 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc260 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c000 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c00c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0xae00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8900 >> 2), + 0x00000000, + 0x3, + (0x0e00 << 16) | (0xc130 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc134 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc208 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc264 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc268 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc26c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc270 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc274 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc278 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc27c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc280 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc284 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc288 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc28c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc290 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc294 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc298 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc29c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x301d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30238 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30250 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30254 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30258 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3025c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc900 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc904 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc908 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0xae00 << 16) | (0xc90c >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc910 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc99c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0600 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bf0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30a04 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a10 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a14 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a2c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc704 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc708 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc768 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc770 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc774 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc778 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc77c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc780 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc784 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc788 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc78c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc798 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc79c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a0 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a4 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a8 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7ac >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7b0 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c010 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92cc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c38 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c3c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9604 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac58 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac68 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac6c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac70 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac74 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac78 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac80 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac84 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac88 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac8c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x970c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9714 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9718 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x971c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x4e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x5e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x6e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x7e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x8e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x9e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0xae00 << 16) | (0x31068 >> 2), + 0x00000000, + (0xbe00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88bc >> 2), + 0x00000000, + (0x0400 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8980 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30938 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3093c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30940 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30904 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c210 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c214 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c218 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8904 >> 2), + 0x00000000, + 0x5, + (0x0e00 << 16) | (0x8c28 >> 2), + (0x0e00 << 16) | (0x8c2c >> 2), + (0x0e00 << 16) | (0x8c30 >> 2), + (0x0e00 << 16) | (0x8c34 >> 2), + (0x0e00 << 16) | (0x9600 >> 2), +}; + +static const u32 kalindi_rlc_save_restore_register_list[] = +{ + (0x0e00 << 16) | (0xc12c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc140 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc150 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc15c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc168 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc170 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc204 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8228 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x829c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x869c >> 2), + 0x00000000, + (0x0600 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc260 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c000 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c00c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8900 >> 2), + 0x00000000, + 0x3, + (0x0e00 << 16) | (0xc130 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc134 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc208 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc264 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc268 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc26c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc270 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc274 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc28c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc290 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc294 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc298 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0x301d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30238 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30250 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30254 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30258 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3025c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc99c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0600 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bf0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30a04 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a10 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a14 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a2c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc704 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc708 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc768 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc770 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc774 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc798 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc79c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c010 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c38 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c3c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9604 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac58 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac68 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac6c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac70 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac74 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac78 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac80 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac84 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac88 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac8c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x970c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9714 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9718 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x971c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x4e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x5e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x6e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x7e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88bc >> 2), + 0x00000000, + (0x0400 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8980 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30938 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3093c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30940 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30904 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3e1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c210 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c214 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c218 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8904 >> 2), + 0x00000000, + 0x5, + (0x0e00 << 16) | (0x8c28 >> 2), + (0x0e00 << 16) | (0x8c2c >> 2), + (0x0e00 << 16) | (0x8c30 >> 2), + (0x0e00 << 16) | (0x8c34 >> 2), + (0x0e00 << 16) | (0x9600 >> 2), +}; + static const u32 bonaire_golden_spm_registers[] = { 0x30800, 0xe0ffffff, 0xe0000000 @@ -4778,6 +5554,39 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev) } } +static void cik_update_rlc(struct radeon_device *rdev, u32 rlc) +{ + u32 tmp; + + tmp = RREG32(RLC_CNTL); + if (tmp != rlc) + WREG32(RLC_CNTL, rlc); +} + +static u32 cik_halt_rlc(struct radeon_device *rdev) +{ + u32 data, orig; + + orig = data = RREG32(RLC_CNTL); + + if (data & RLC_ENABLE) { + u32 i; + + data &= ~RLC_ENABLE; + WREG32(RLC_CNTL, data); + + for (i = 0; i < rdev->usec_timeout; i++) { + if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0) + break; + udelay(1); + } + + cik_wait_for_rlc_serdes(rdev); + } + + return orig; +} + /** * cik_rlc_stop - stop the RLC ME * @@ -4787,20 +5596,10 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev) */ static void cik_rlc_stop(struct radeon_device *rdev) { - u32 tmp; + WREG32(RLC_CNTL, 0); cik_enable_gui_idle_interrupt(rdev, false); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - - tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; - WREG32(RLC_CGCG_CGLS_CTRL, tmp); - - WREG32(RLC_CNTL, 0); - cik_wait_for_rlc_serdes(rdev); } @@ -4831,8 +5630,7 @@ static void cik_rlc_start(struct radeon_device *rdev) */ static int cik_rlc_resume(struct radeon_device *rdev) { - u32 i, size; - u32 clear_state_info[3]; + u32 i, size, tmp; const __be32 *fw_data; if (!rdev->rlc_fw) @@ -4853,8 +5651,16 @@ static int cik_rlc_resume(struct radeon_device *rdev) cik_rlc_stop(rdev); + /* disable CG */ + tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; + WREG32(RLC_CGCG_CGLS_CTRL, tmp); + si_rlc_reset(rdev); + cik_init_pg(rdev); + + cik_init_cg(rdev); + WREG32(RLC_LB_CNTR_INIT, 0); WREG32(RLC_LB_CNTR_MAX, 0x00008000); @@ -4875,20 +5681,634 @@ static int cik_rlc_resume(struct radeon_device *rdev) /* XXX - find out what chips support lbpw */ cik_enable_lbpw(rdev, false); - /* XXX */ - clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr); - clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr; - clear_state_info[2] = 0;//cik_default_size; - WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d); - for (i = 0; i < 3; i++) - WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]); - WREG32(RLC_DRIVER_DMA_STATUS, 0); + if (rdev->family == CHIP_BONAIRE) + WREG32(RLC_DRIVER_DMA_STATUS, 0); cik_rlc_start(rdev); return 0; } +static void cik_enable_cgcg(struct radeon_device *rdev, bool enable) +{ + u32 data, orig, tmp, tmp2; + + orig = data = RREG32(RLC_CGCG_CGLS_CTRL); + + cik_enable_gui_idle_interrupt(rdev, enable); + + if (enable) { + tmp = cik_halt_rlc(rdev); + + cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); + WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE; + WREG32(RLC_SERDES_WR_CTRL, tmp2); + + cik_update_rlc(rdev, tmp); + + data |= CGCG_EN | CGLS_EN; + } else { + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + data &= ~(CGCG_EN | CGLS_EN); + } + + if (orig != data) + WREG32(RLC_CGCG_CGLS_CTRL, data); + +} + +static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) +{ + u32 data, orig, tmp = 0; + + if (enable) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data &= 0xfffffffd; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + tmp = cik_halt_rlc(rdev); + + cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); + WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + data = BPM_ADDR_MASK | MGCG_OVERRIDE_0; + WREG32(RLC_SERDES_WR_CTRL, data); + + cik_update_rlc(rdev, tmp); + + orig = data = RREG32(CGTS_SM_CTRL_REG); + data &= ~SM_MODE_MASK; + data |= SM_MODE(0x2); + data |= SM_MODE_ENABLE; + data &= ~CGTS_OVERRIDE; + data &= ~CGTS_LS_OVERRIDE; + data &= ~ON_MONITOR_ADD_MASK; + data |= ON_MONITOR_ADD_EN; + data |= ON_MONITOR_ADD(0x96); + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + } else { + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000002; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + data = RREG32(RLC_MEM_SLP_CNTL); + if (data & RLC_MEM_LS_EN) { + data &= ~RLC_MEM_LS_EN; + WREG32(RLC_MEM_SLP_CNTL, data); + } + + data = RREG32(CP_MEM_SLP_CNTL); + if (data & CP_MEM_LS_EN) { + data &= ~CP_MEM_LS_EN; + WREG32(CP_MEM_SLP_CNTL, data); + } + + orig = data = RREG32(CGTS_SM_CTRL_REG); + data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE; + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + + tmp = cik_halt_rlc(rdev); + + cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); + WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + data = BPM_ADDR_MASK | MGCG_OVERRIDE_1; + WREG32(RLC_SERDES_WR_CTRL, data); + + cik_update_rlc(rdev, tmp); + } +} + +static const u32 mc_cg_registers[] = +{ + MC_HUB_MISC_HUB_CG, + MC_HUB_MISC_SIP_CG, + MC_HUB_MISC_VM_CG, + MC_XPB_CLK_GAT, + ATC_MISC_CG, + MC_CITF_MISC_WR_CG, + MC_CITF_MISC_RD_CG, + MC_CITF_MISC_VM_CG, + VM_L2_CG, +}; + +static void cik_enable_mc_ls(struct radeon_device *rdev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable) + data |= MC_LS_ENABLE; + else + data &= ~MC_LS_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void cik_enable_mc_mgcg(struct radeon_device *rdev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable) + data |= MC_CG_ENABLE; + else + data &= ~MC_CG_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void cik_enable_sdma_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + if (enable) { + WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); + WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); + } else { + orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); + data |= 0xff000000; + if (data != orig) + WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); + data |= 0xff000000; + if (data != orig) + WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); + } +} + +static void cik_enable_sdma_mgls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + if (enable) { + orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); + data |= 0x100; + if (orig != data) + WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); + data |= 0x100; + if (orig != data) + WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); + } else { + orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); + data &= ~0x100; + if (orig != data) + WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); + data &= ~0x100; + if (orig != data) + WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); + } +} + +static void cik_enable_uvd_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + if (enable) { + data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); + data = 0xfff; + WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(UVD_CGC_CTRL); + data |= DCM; + if (orig != data) + WREG32(UVD_CGC_CTRL, data); + } else { + data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); + data &= ~0xfff; + WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(UVD_CGC_CTRL); + data &= ~DCM; + if (orig != data) + WREG32(UVD_CGC_CTRL, data); + } +} + +static void cik_enable_hdp_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_HOST_PATH_CNTL); + + if (enable) + data &= ~CLOCK_GATING_DIS; + else + data |= CLOCK_GATING_DIS; + + if (orig != data) + WREG32(HDP_HOST_PATH_CNTL, data); +} + +static void cik_enable_hdp_ls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_MEM_POWER_LS); + + if (enable) + data |= HDP_LS_ENABLE; + else + data &= ~HDP_LS_ENABLE; + + if (orig != data) + WREG32(HDP_MEM_POWER_LS, data); +} + +void cik_update_cg(struct radeon_device *rdev, + u32 block, bool enable) +{ + if (block & RADEON_CG_BLOCK_GFX) { + /* order matters! */ + if (enable) { + cik_enable_mgcg(rdev, true); + cik_enable_cgcg(rdev, true); + } else { + cik_enable_cgcg(rdev, false); + cik_enable_mgcg(rdev, false); + } + } + + if (block & RADEON_CG_BLOCK_MC) { + if (!(rdev->flags & RADEON_IS_IGP)) { + cik_enable_mc_mgcg(rdev, enable); + cik_enable_mc_ls(rdev, enable); + } + } + + if (block & RADEON_CG_BLOCK_SDMA) { + cik_enable_sdma_mgcg(rdev, enable); + cik_enable_sdma_mgls(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_UVD) { + if (rdev->has_uvd) + cik_enable_uvd_mgcg(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_HDP) { + cik_enable_hdp_mgcg(rdev, enable); + cik_enable_hdp_ls(rdev, enable); + } +} + +static void cik_init_cg(struct radeon_device *rdev) +{ + + cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false); /* XXX true */ + + if (rdev->has_uvd) + si_init_uvd_internal_cg(rdev); + + cik_update_cg(rdev, (RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), true); +} + +static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE; + else + data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE; + else + data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data &= ~DISABLE_CP_PG; + else + data |= DISABLE_CP_PG; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data &= ~DISABLE_GDS_PG; + else + data |= DISABLE_GDS_PG; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +#define CP_ME_TABLE_SIZE 96 +#define CP_ME_TABLE_OFFSET 2048 +#define CP_MEC_TABLE_OFFSET 4096 + +void cik_init_cp_pg_table(struct radeon_device *rdev) +{ + const __be32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset; + + if (rdev->family == CHIP_KAVERI) + max_me = 5; + + if (rdev->rlc.cp_table_ptr == NULL) + return; + + /* write the cp table buffer */ + dst_ptr = rdev->rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + fw_data = (const __be32 *)rdev->ce_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else if (me == 1) { + fw_data = (const __be32 *)rdev->pfp_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else if (me == 2) { + fw_data = (const __be32 *)rdev->me_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else { + fw_data = (const __be32 *)rdev->mec_fw->data; + table_offset = CP_MEC_TABLE_OFFSET; + } + + for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { + dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]); + } + bo_offset += CP_ME_TABLE_SIZE; + } +} + +static void cik_enable_gfx_cgpg(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig; + + if (enable) { + orig = data = RREG32(RLC_PG_CNTL); + data |= GFX_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); + + orig = data = RREG32(RLC_AUTO_PG_CTRL); + data |= AUTO_PG_EN; + if (orig != data) + WREG32(RLC_AUTO_PG_CTRL, data); + } else { + orig = data = RREG32(RLC_PG_CNTL); + data &= ~GFX_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); + + orig = data = RREG32(RLC_AUTO_PG_CTRL); + data &= ~AUTO_PG_EN; + if (orig != data) + WREG32(RLC_AUTO_PG_CTRL, data); + + data = RREG32(DB_RENDER_CONTROL); + } +} + +static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) +{ + u32 mask = 0, tmp, tmp1; + int i; + + cik_select_se_sh(rdev, se, sh); + tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); + tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); + cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); + + tmp &= 0xffff0000; + + tmp |= tmp1; + tmp >>= 16; + + for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) { + mask <<= 1; + mask |= 1; + } + + return (~tmp) & mask; +} + +static void cik_init_ao_cu_mask(struct radeon_device *rdev) +{ + u32 i, j, k, active_cu_number = 0; + u32 mask, counter, cu_bitmap; + u32 tmp = 0; + + for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { + for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { + mask = 1; + cu_bitmap = 0; + counter = 0; + for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) { + if (cik_get_cu_active_bitmap(rdev, i, j) & mask) { + if (counter < 2) + cu_bitmap |= mask; + counter ++; + } + mask <<= 1; + } + + active_cu_number += counter; + tmp |= (cu_bitmap << (i * 16 + j * 8)); + } + } + + WREG32(RLC_PG_AO_CU_MASK, tmp); + + tmp = RREG32(RLC_MAX_PG_CU); + tmp &= ~MAX_PU_CU_MASK; + tmp |= MAX_PU_CU(active_cu_number); + WREG32(RLC_MAX_PG_CU, tmp); +} + +static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data |= STATIC_PER_CU_PG_ENABLE; + else + data &= ~STATIC_PER_CU_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable) + data |= DYN_PER_CU_PG_ENABLE; + else + data &= ~DYN_PER_CU_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 +#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D + +static void cik_init_gfx_cgpg(struct radeon_device *rdev) +{ + u32 data, orig; + u32 i; + + if (rdev->rlc.cs_data) { + WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); + WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr)); + WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_gpu_addr); + WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size); + } else { + WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); + for (i = 0; i < 3; i++) + WREG32(RLC_GPM_SCRATCH_DATA, 0); + } + if (rdev->rlc.reg_list) { + WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET); + for (i = 0; i < rdev->rlc.reg_list_size; i++) + WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]); + } + + orig = data = RREG32(RLC_PG_CNTL); + data |= GFX_PG_SRC; + if (orig != data) + WREG32(RLC_PG_CNTL, data); + + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8); + + data = RREG32(CP_RB_WPTR_POLL_CNTL); + data &= ~IDLE_POLL_COUNT_MASK; + data |= IDLE_POLL_COUNT(0x60); + WREG32(CP_RB_WPTR_POLL_CNTL, data); + + data = 0x10101010; + WREG32(RLC_PG_DELAY, data); + + data = RREG32(RLC_PG_DELAY_2); + data &= ~0xff; + data |= 0x3; + WREG32(RLC_PG_DELAY_2, data); + + data = RREG32(RLC_AUTO_PG_CTRL); + data &= ~GRBM_REG_SGIT_MASK; + data |= GRBM_REG_SGIT(0x700); + WREG32(RLC_AUTO_PG_CTRL, data); + +} + +static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable) +{ + bool has_pg = false; + bool has_dyn_mgpg = false; + bool has_static_mgpg = false; + + /* only APUs have PG */ + if (rdev->flags & RADEON_IS_IGP) { + has_pg = true; + has_static_mgpg = true; + if (rdev->family == CHIP_KAVERI) + has_dyn_mgpg = true; + } + + if (has_pg) { + cik_enable_gfx_cgpg(rdev, enable); + if (enable) { + cik_enable_gfx_static_mgpg(rdev, has_static_mgpg); + cik_enable_gfx_dynamic_mgpg(rdev, has_dyn_mgpg); + } else { + cik_enable_gfx_static_mgpg(rdev, false); + cik_enable_gfx_dynamic_mgpg(rdev, false); + } + } + +} + +void cik_init_pg(struct radeon_device *rdev) +{ + bool has_pg = false; + + /* only APUs have PG */ + if (rdev->flags & RADEON_IS_IGP) { + /* XXX disable this for now */ + /* has_pg = true; */ + } + + if (has_pg) { + cik_enable_sck_slowdown_on_pu(rdev, true); + cik_enable_sck_slowdown_on_pd(rdev, true); + cik_init_gfx_cgpg(rdev); + cik_enable_cp_pg(rdev, true); + cik_enable_gds_pg(rdev, true); + cik_init_ao_cu_mask(rdev); + cik_update_gfx_pg(rdev, true); + } +} + /* * Interrupts * Starting with r6xx, interrupts are handled via a ring buffer. @@ -6019,6 +7439,19 @@ static int cik_startup(struct radeon_device *rdev) cik_gpu_init(rdev); /* allocate rlc buffers */ + if (rdev->flags & RADEON_IS_IGP) { + if (rdev->family == CHIP_KAVERI) { + rdev->rlc.reg_list = spectre_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list); + } else { + rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list); + } + } + rdev->rlc.cs_data = ci_cs_data; + rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 4742f3db4aa6..63955abb1e11 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -299,6 +299,10 @@ #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C #define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 +#define VM_L2_CG 0x15c0 +#define MC_CG_ENABLE (1 << 18) +#define MC_LS_ENABLE (1 << 19) + #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x0000f000 @@ -328,6 +332,17 @@ #define MC_SHARED_BLACKOUT_CNTL 0x20ac +#define MC_HUB_MISC_HUB_CG 0x20b8 +#define MC_HUB_MISC_VM_CG 0x20bc + +#define MC_HUB_MISC_SIP_CG 0x20c0 + +#define MC_XPB_CLK_GAT 0x2478 + +#define MC_CITF_MISC_RD_CG 0x2648 +#define MC_CITF_MISC_WR_CG 0x264c +#define MC_CITF_MISC_VM_CG 0x2650 + #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 @@ -357,6 +372,7 @@ #define MC_SEQ_IO_DEBUG_DATA 0x2a48 #define HDP_HOST_PATH_CNTL 0x2C00 +#define CLOCK_GATING_DIS (1 << 23) #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C @@ -364,6 +380,10 @@ #define HDP_ADDR_CONFIG 0x2F48 #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) +#define HDP_MEM_POWER_LS 0x2F50 +#define HDP_LS_ENABLE (1 << 0) + +#define ATC_MISC_CG 0x3350 #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) @@ -631,6 +651,9 @@ #define CP_RB0_RPTR 0x8700 #define CP_RB_WPTR_DELAY 0x8704 +#define CP_RB_WPTR_POLL_CNTL 0x8708 +#define IDLE_POLL_COUNT(x) ((x) << 16) +#define IDLE_POLL_COUNT_MASK (0xffff << 16) #define CP_MEQ_THRESHOLDS 0x8764 #define MEQ1_START(x) ((x) << 0) @@ -857,6 +880,9 @@ # define CP_RINGID1_INT_STAT (1 << 30) # define CP_RINGID0_INT_STAT (1 << 31) +#define CP_MEM_SLP_CNTL 0xC1E4 +# define CP_MEM_LS_EN (1 << 0) + #define CP_CPF_DEBUG 0xC200 #define CP_PQ_WPTR_POLL_CNTL 0xC20C @@ -902,6 +928,9 @@ #define RLC_MC_CNTL 0xC30C +#define RLC_MEM_SLP_CNTL 0xC318 +# define RLC_MEM_LS_EN (1 << 0) + #define RLC_LB_CNTR_MAX 0xC348 #define RLC_LB_CNTL 0xC364 @@ -910,7 +939,9 @@ #define RLC_LB_CNTR_INIT 0xC36C #define RLC_SAVE_AND_RESTORE_BASE 0xC374 -#define RLC_DRIVER_DMA_STATUS 0xC378 +#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */ +#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */ +#define RLC_PG_DELAY_2 0xC37C #define RLC_GPM_UCODE_ADDR 0xC388 #define RLC_GPM_UCODE_DATA 0xC38C @@ -919,12 +950,50 @@ #define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398 #define RLC_UCODE_CNTL 0xC39C +#define RLC_GPM_STAT 0xC400 +# define RLC_GPM_BUSY (1 << 0) + +#define RLC_PG_CNTL 0xC40C +# define GFX_PG_ENABLE (1 << 0) +# define GFX_PG_SRC (1 << 1) +# define DYN_PER_CU_PG_ENABLE (1 << 2) +# define STATIC_PER_CU_PG_ENABLE (1 << 3) +# define DISABLE_GDS_PG (1 << 13) +# define DISABLE_CP_PG (1 << 15) +# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17) +# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18) + +#define RLC_CGTT_MGCG_OVERRIDE 0xC420 #define RLC_CGCG_CGLS_CTRL 0xC424 +# define CGCG_EN (1 << 0) +# define CGLS_EN (1 << 1) + +#define RLC_PG_DELAY 0xC434 #define RLC_LB_INIT_CU_MASK 0xC43C #define RLC_LB_PARAMS 0xC444 +#define RLC_PG_AO_CU_MASK 0xC44C + +#define RLC_MAX_PG_CU 0xC450 +# define MAX_PU_CU(x) ((x) << 0) +# define MAX_PU_CU_MASK (0xff << 0) +#define RLC_AUTO_PG_CTRL 0xC454 +# define AUTO_PG_EN (1 << 0) +# define GRBM_REG_SGIT(x) ((x) << 3) +# define GRBM_REG_SGIT_MASK (0xffff << 3) + +#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474 +#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478 +#define RLC_SERDES_WR_CTRL 0xC47C +#define BPM_ADDR(x) ((x) << 0) +#define BPM_ADDR_MASK (0xff << 0) +#define CGLS_ENABLE (1 << 16) +#define CGCG_OVERRIDE_0 (1 << 20) +#define MGCG_OVERRIDE_0 (1 << 22) +#define MGCG_OVERRIDE_1 (1 << 23) + #define RLC_SERDES_CU_MASTER_BUSY 0xC484 #define RLC_SERDES_NONCU_MASTER_BUSY 0xC488 # define SE_MASTER_BUSY_MASK 0x0000ffff @@ -979,6 +1048,8 @@ #define MQD_VMID(x) ((x) << 0) #define MQD_VMID_MASK (0xf << 0) +#define DB_RENDER_CONTROL 0x28000 + #define PA_SC_RASTER_CONFIG 0x28350 # define RASTER_CONFIG_RB_MAP_0 0 # define RASTER_CONFIG_RB_MAP_1 1 @@ -1072,6 +1143,16 @@ #define CP_PERFMON_CNTL 0x36020 +#define CGTS_SM_CTRL_REG 0x3c000 +#define SM_MODE(x) ((x) << 17) +#define SM_MODE_MASK (0x7 << 17) +#define SM_MODE_ENABLE (1 << 20) +#define CGTS_OVERRIDE (1 << 21) +#define CGTS_LS_OVERRIDE (1 << 22) +#define ON_MONITOR_ADD_EN (1 << 23) +#define ON_MONITOR_ADD(x) ((x) << 24) +#define ON_MONITOR_ADD_MASK (0xff << 24) + #define CGTS_TCC_DISABLE 0x3c00c #define CGTS_USER_TCC_DISABLE 0x3c010 #define TCC_DISABLE_MASK 0xFFFF0000 @@ -1304,6 +1385,8 @@ #define SDMA0_UCODE_ADDR 0xD000 #define SDMA0_UCODE_DATA 0xD004 +#define SDMA0_POWER_CNTL 0xD008 +#define SDMA0_CLK_CTRL 0xD00C #define SDMA0_CNTL 0xD010 # define TRAP_ENABLE (1 << 0) @@ -1428,6 +1511,13 @@ #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 +#define UVD_CGC_CTRL 0xF4B0 +# define DCM (1 << 0) +# define CG_DT(x) ((x) << 2) +# define CG_DT_MASK (0xf << 2) +# define CLK_OD(x) ((x) << 6) +# define CLK_OD_MASK (0x1f << 6) + /* UVD clocks */ #define CG_DCLK_CNTL 0xC050009C @@ -1438,4 +1528,7 @@ #define CG_VCLK_CNTL 0xC05000A4 #define CG_VCLK_STATUS 0xC05000A8 +/* UVD CTX indirect */ +#define UVD_CGC_MEM_CTRL 0xC0 + #endif diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h new file mode 100644 index 000000000000..c3982f9475fb --- /dev/null +++ b/drivers/gpu/drm/radeon/clearstate_ci.h @@ -0,0 +1,944 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +static const unsigned int ci_SECT_CONTEXT_def_1[] = +{ + 0x00000000, // DB_RENDER_CONTROL + 0x00000000, // DB_COUNT_CONTROL + 0x00000000, // DB_DEPTH_VIEW + 0x00000000, // DB_RENDER_OVERRIDE + 0x00000000, // DB_RENDER_OVERRIDE2 + 0x00000000, // DB_HTILE_DATA_BASE + 0, // HOLE + 0, // HOLE + 0x00000000, // DB_DEPTH_BOUNDS_MIN + 0x00000000, // DB_DEPTH_BOUNDS_MAX + 0x00000000, // DB_STENCIL_CLEAR + 0x00000000, // DB_DEPTH_CLEAR + 0x00000000, // PA_SC_SCREEN_SCISSOR_TL + 0x40004000, // PA_SC_SCREEN_SCISSOR_BR + 0, // HOLE + 0x00000000, // DB_DEPTH_INFO + 0x00000000, // DB_Z_INFO + 0x00000000, // DB_STENCIL_INFO + 0x00000000, // DB_Z_READ_BASE + 0x00000000, // DB_STENCIL_READ_BASE + 0x00000000, // DB_Z_WRITE_BASE + 0x00000000, // DB_STENCIL_WRITE_BASE + 0x00000000, // DB_DEPTH_SIZE + 0x00000000, // DB_DEPTH_SLICE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // TA_BC_BASE_ADDR + 0x00000000, // TA_BC_BASE_ADDR_HI + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // COHER_DEST_BASE_HI_0 + 0x00000000, // COHER_DEST_BASE_HI_1 + 0x00000000, // COHER_DEST_BASE_HI_2 + 0x00000000, // COHER_DEST_BASE_HI_3 + 0x00000000, // COHER_DEST_BASE_2 + 0x00000000, // COHER_DEST_BASE_3 + 0x00000000, // PA_SC_WINDOW_OFFSET + 0x80000000, // PA_SC_WINDOW_SCISSOR_TL + 0x40004000, // PA_SC_WINDOW_SCISSOR_BR + 0x0000ffff, // PA_SC_CLIPRECT_RULE + 0x00000000, // PA_SC_CLIPRECT_0_TL + 0x40004000, // PA_SC_CLIPRECT_0_BR + 0x00000000, // PA_SC_CLIPRECT_1_TL + 0x40004000, // PA_SC_CLIPRECT_1_BR + 0x00000000, // PA_SC_CLIPRECT_2_TL + 0x40004000, // PA_SC_CLIPRECT_2_BR + 0x00000000, // PA_SC_CLIPRECT_3_TL + 0x40004000, // PA_SC_CLIPRECT_3_BR + 0xaa99aaaa, // PA_SC_EDGERULE + 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET + 0xffffffff, // CB_TARGET_MASK + 0xffffffff, // CB_SHADER_MASK + 0x80000000, // PA_SC_GENERIC_SCISSOR_TL + 0x40004000, // PA_SC_GENERIC_SCISSOR_BR + 0x00000000, // COHER_DEST_BASE_0 + 0x00000000, // COHER_DEST_BASE_1 + 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR + 0x00000000, // PA_SC_VPORT_ZMIN_0 + 0x3f800000, // PA_SC_VPORT_ZMAX_0 + 0x00000000, // PA_SC_VPORT_ZMIN_1 + 0x3f800000, // PA_SC_VPORT_ZMAX_1 + 0x00000000, // PA_SC_VPORT_ZMIN_2 + 0x3f800000, // PA_SC_VPORT_ZMAX_2 + 0x00000000, // PA_SC_VPORT_ZMIN_3 + 0x3f800000, // PA_SC_VPORT_ZMAX_3 + 0x00000000, // PA_SC_VPORT_ZMIN_4 + 0x3f800000, // PA_SC_VPORT_ZMAX_4 + 0x00000000, // PA_SC_VPORT_ZMIN_5 + 0x3f800000, // PA_SC_VPORT_ZMAX_5 + 0x00000000, // PA_SC_VPORT_ZMIN_6 + 0x3f800000, // PA_SC_VPORT_ZMAX_6 + 0x00000000, // PA_SC_VPORT_ZMIN_7 + 0x3f800000, // PA_SC_VPORT_ZMAX_7 + 0x00000000, // PA_SC_VPORT_ZMIN_8 + 0x3f800000, // PA_SC_VPORT_ZMAX_8 + 0x00000000, // PA_SC_VPORT_ZMIN_9 + 0x3f800000, // PA_SC_VPORT_ZMAX_9 + 0x00000000, // PA_SC_VPORT_ZMIN_10 + 0x3f800000, // PA_SC_VPORT_ZMAX_10 + 0x00000000, // PA_SC_VPORT_ZMIN_11 + 0x3f800000, // PA_SC_VPORT_ZMAX_11 + 0x00000000, // PA_SC_VPORT_ZMIN_12 + 0x3f800000, // PA_SC_VPORT_ZMAX_12 + 0x00000000, // PA_SC_VPORT_ZMIN_13 + 0x3f800000, // PA_SC_VPORT_ZMAX_13 + 0x00000000, // PA_SC_VPORT_ZMIN_14 + 0x3f800000, // PA_SC_VPORT_ZMAX_14 + 0x00000000, // PA_SC_VPORT_ZMIN_15 + 0x3f800000, // PA_SC_VPORT_ZMAX_15 +}; +static const unsigned int ci_SECT_CONTEXT_def_2[] = +{ + 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL + 0, // HOLE + 0x00000000, // CP_PERFMON_CNTX_CNTL + 0x00000000, // CP_RINGID + 0x00000000, // CP_VMID + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0xffffffff, // VGT_MAX_VTX_INDX + 0x00000000, // VGT_MIN_VTX_INDX + 0x00000000, // VGT_INDX_OFFSET + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX + 0, // HOLE + 0x00000000, // CB_BLEND_RED + 0x00000000, // CB_BLEND_GREEN + 0x00000000, // CB_BLEND_BLUE + 0x00000000, // CB_BLEND_ALPHA + 0, // HOLE + 0, // HOLE + 0x00000000, // DB_STENCIL_CONTROL + 0x00000000, // DB_STENCILREFMASK + 0x00000000, // DB_STENCILREFMASK_BF + 0, // HOLE + 0x00000000, // PA_CL_VPORT_XSCALE + 0x00000000, // PA_CL_VPORT_XOFFSET + 0x00000000, // PA_CL_VPORT_YSCALE + 0x00000000, // PA_CL_VPORT_YOFFSET + 0x00000000, // PA_CL_VPORT_ZSCALE + 0x00000000, // PA_CL_VPORT_ZOFFSET + 0x00000000, // PA_CL_VPORT_XSCALE_1 + 0x00000000, // PA_CL_VPORT_XOFFSET_1 + 0x00000000, // PA_CL_VPORT_YSCALE_1 + 0x00000000, // PA_CL_VPORT_YOFFSET_1 + 0x00000000, // PA_CL_VPORT_ZSCALE_1 + 0x00000000, // PA_CL_VPORT_ZOFFSET_1 + 0x00000000, // PA_CL_VPORT_XSCALE_2 + 0x00000000, // PA_CL_VPORT_XOFFSET_2 + 0x00000000, // PA_CL_VPORT_YSCALE_2 + 0x00000000, // PA_CL_VPORT_YOFFSET_2 + 0x00000000, // PA_CL_VPORT_ZSCALE_2 + 0x00000000, // PA_CL_VPORT_ZOFFSET_2 + 0x00000000, // PA_CL_VPORT_XSCALE_3 + 0x00000000, // PA_CL_VPORT_XOFFSET_3 + 0x00000000, // PA_CL_VPORT_YSCALE_3 + 0x00000000, // PA_CL_VPORT_YOFFSET_3 + 0x00000000, // PA_CL_VPORT_ZSCALE_3 + 0x00000000, // PA_CL_VPORT_ZOFFSET_3 + 0x00000000, // PA_CL_VPORT_XSCALE_4 + 0x00000000, // PA_CL_VPORT_XOFFSET_4 + 0x00000000, // PA_CL_VPORT_YSCALE_4 + 0x00000000, // PA_CL_VPORT_YOFFSET_4 + 0x00000000, // PA_CL_VPORT_ZSCALE_4 + 0x00000000, // PA_CL_VPORT_ZOFFSET_4 + 0x00000000, // PA_CL_VPORT_XSCALE_5 + 0x00000000, // PA_CL_VPORT_XOFFSET_5 + 0x00000000, // PA_CL_VPORT_YSCALE_5 + 0x00000000, // PA_CL_VPORT_YOFFSET_5 + 0x00000000, // PA_CL_VPORT_ZSCALE_5 + 0x00000000, // PA_CL_VPORT_ZOFFSET_5 + 0x00000000, // PA_CL_VPORT_XSCALE_6 + 0x00000000, // PA_CL_VPORT_XOFFSET_6 + 0x00000000, // PA_CL_VPORT_YSCALE_6 + 0x00000000, // PA_CL_VPORT_YOFFSET_6 + 0x00000000, // PA_CL_VPORT_ZSCALE_6 + 0x00000000, // PA_CL_VPORT_ZOFFSET_6 + 0x00000000, // PA_CL_VPORT_XSCALE_7 + 0x00000000, // PA_CL_VPORT_XOFFSET_7 + 0x00000000, // PA_CL_VPORT_YSCALE_7 + 0x00000000, // PA_CL_VPORT_YOFFSET_7 + 0x00000000, // PA_CL_VPORT_ZSCALE_7 + 0x00000000, // PA_CL_VPORT_ZOFFSET_7 + 0x00000000, // PA_CL_VPORT_XSCALE_8 + 0x00000000, // PA_CL_VPORT_XOFFSET_8 + 0x00000000, // PA_CL_VPORT_YSCALE_8 + 0x00000000, // PA_CL_VPORT_YOFFSET_8 + 0x00000000, // PA_CL_VPORT_ZSCALE_8 + 0x00000000, // PA_CL_VPORT_ZOFFSET_8 + 0x00000000, // PA_CL_VPORT_XSCALE_9 + 0x00000000, // PA_CL_VPORT_XOFFSET_9 + 0x00000000, // PA_CL_VPORT_YSCALE_9 + 0x00000000, // PA_CL_VPORT_YOFFSET_9 + 0x00000000, // PA_CL_VPORT_ZSCALE_9 + 0x00000000, // PA_CL_VPORT_ZOFFSET_9 + 0x00000000, // PA_CL_VPORT_XSCALE_10 + 0x00000000, // PA_CL_VPORT_XOFFSET_10 + 0x00000000, // PA_CL_VPORT_YSCALE_10 + 0x00000000, // PA_CL_VPORT_YOFFSET_10 + 0x00000000, // PA_CL_VPORT_ZSCALE_10 + 0x00000000, // PA_CL_VPORT_ZOFFSET_10 + 0x00000000, // PA_CL_VPORT_XSCALE_11 + 0x00000000, // PA_CL_VPORT_XOFFSET_11 + 0x00000000, // PA_CL_VPORT_YSCALE_11 + 0x00000000, // PA_CL_VPORT_YOFFSET_11 + 0x00000000, // PA_CL_VPORT_ZSCALE_11 + 0x00000000, // PA_CL_VPORT_ZOFFSET_11 + 0x00000000, // PA_CL_VPORT_XSCALE_12 + 0x00000000, // PA_CL_VPORT_XOFFSET_12 + 0x00000000, // PA_CL_VPORT_YSCALE_12 + 0x00000000, // PA_CL_VPORT_YOFFSET_12 + 0x00000000, // PA_CL_VPORT_ZSCALE_12 + 0x00000000, // PA_CL_VPORT_ZOFFSET_12 + 0x00000000, // PA_CL_VPORT_XSCALE_13 + 0x00000000, // PA_CL_VPORT_XOFFSET_13 + 0x00000000, // PA_CL_VPORT_YSCALE_13 + 0x00000000, // PA_CL_VPORT_YOFFSET_13 + 0x00000000, // PA_CL_VPORT_ZSCALE_13 + 0x00000000, // PA_CL_VPORT_ZOFFSET_13 + 0x00000000, // PA_CL_VPORT_XSCALE_14 + 0x00000000, // PA_CL_VPORT_XOFFSET_14 + 0x00000000, // PA_CL_VPORT_YSCALE_14 + 0x00000000, // PA_CL_VPORT_YOFFSET_14 + 0x00000000, // PA_CL_VPORT_ZSCALE_14 + 0x00000000, // PA_CL_VPORT_ZOFFSET_14 + 0x00000000, // PA_CL_VPORT_XSCALE_15 + 0x00000000, // PA_CL_VPORT_XOFFSET_15 + 0x00000000, // PA_CL_VPORT_YSCALE_15 + 0x00000000, // PA_CL_VPORT_YOFFSET_15 + 0x00000000, // PA_CL_VPORT_ZSCALE_15 + 0x00000000, // PA_CL_VPORT_ZOFFSET_15 + 0x00000000, // PA_CL_UCP_0_X + 0x00000000, // PA_CL_UCP_0_Y + 0x00000000, // PA_CL_UCP_0_Z + 0x00000000, // PA_CL_UCP_0_W + 0x00000000, // PA_CL_UCP_1_X + 0x00000000, // PA_CL_UCP_1_Y + 0x00000000, // PA_CL_UCP_1_Z + 0x00000000, // PA_CL_UCP_1_W + 0x00000000, // PA_CL_UCP_2_X + 0x00000000, // PA_CL_UCP_2_Y + 0x00000000, // PA_CL_UCP_2_Z + 0x00000000, // PA_CL_UCP_2_W + 0x00000000, // PA_CL_UCP_3_X + 0x00000000, // PA_CL_UCP_3_Y + 0x00000000, // PA_CL_UCP_3_Z + 0x00000000, // PA_CL_UCP_3_W + 0x00000000, // PA_CL_UCP_4_X + 0x00000000, // PA_CL_UCP_4_Y + 0x00000000, // PA_CL_UCP_4_Z + 0x00000000, // PA_CL_UCP_4_W + 0x00000000, // PA_CL_UCP_5_X + 0x00000000, // PA_CL_UCP_5_Y + 0x00000000, // PA_CL_UCP_5_Z + 0x00000000, // PA_CL_UCP_5_W + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_CNTL_0 + 0x00000000, // SPI_PS_INPUT_CNTL_1 + 0x00000000, // SPI_PS_INPUT_CNTL_2 + 0x00000000, // SPI_PS_INPUT_CNTL_3 + 0x00000000, // SPI_PS_INPUT_CNTL_4 + 0x00000000, // SPI_PS_INPUT_CNTL_5 + 0x00000000, // SPI_PS_INPUT_CNTL_6 + 0x00000000, // SPI_PS_INPUT_CNTL_7 + 0x00000000, // SPI_PS_INPUT_CNTL_8 + 0x00000000, // SPI_PS_INPUT_CNTL_9 + 0x00000000, // SPI_PS_INPUT_CNTL_10 + 0x00000000, // SPI_PS_INPUT_CNTL_11 + 0x00000000, // SPI_PS_INPUT_CNTL_12 + 0x00000000, // SPI_PS_INPUT_CNTL_13 + 0x00000000, // SPI_PS_INPUT_CNTL_14 + 0x00000000, // SPI_PS_INPUT_CNTL_15 + 0x00000000, // SPI_PS_INPUT_CNTL_16 + 0x00000000, // SPI_PS_INPUT_CNTL_17 + 0x00000000, // SPI_PS_INPUT_CNTL_18 + 0x00000000, // SPI_PS_INPUT_CNTL_19 + 0x00000000, // SPI_PS_INPUT_CNTL_20 + 0x00000000, // SPI_PS_INPUT_CNTL_21 + 0x00000000, // SPI_PS_INPUT_CNTL_22 + 0x00000000, // SPI_PS_INPUT_CNTL_23 + 0x00000000, // SPI_PS_INPUT_CNTL_24 + 0x00000000, // SPI_PS_INPUT_CNTL_25 + 0x00000000, // SPI_PS_INPUT_CNTL_26 + 0x00000000, // SPI_PS_INPUT_CNTL_27 + 0x00000000, // SPI_PS_INPUT_CNTL_28 + 0x00000000, // SPI_PS_INPUT_CNTL_29 + 0x00000000, // SPI_PS_INPUT_CNTL_30 + 0x00000000, // SPI_PS_INPUT_CNTL_31 + 0x00000000, // SPI_VS_OUT_CONFIG + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_ENA + 0x00000000, // SPI_PS_INPUT_ADDR + 0x00000000, // SPI_INTERP_CONTROL_0 + 0x00000002, // SPI_PS_IN_CONTROL + 0, // HOLE + 0x00000000, // SPI_BARYC_CNTL + 0, // HOLE + 0x00000000, // SPI_TMPRING_SIZE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_SHADER_POS_FORMAT + 0x00000000, // SPI_SHADER_Z_FORMAT + 0x00000000, // SPI_SHADER_COL_FORMAT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_BLEND0_CONTROL + 0x00000000, // CB_BLEND1_CONTROL + 0x00000000, // CB_BLEND2_CONTROL + 0x00000000, // CB_BLEND3_CONTROL + 0x00000000, // CB_BLEND4_CONTROL + 0x00000000, // CB_BLEND5_CONTROL + 0x00000000, // CB_BLEND6_CONTROL + 0x00000000, // CB_BLEND7_CONTROL +}; +static const unsigned int ci_SECT_CONTEXT_def_3[] = +{ + 0x00000000, // PA_CL_POINT_X_RAD + 0x00000000, // PA_CL_POINT_Y_RAD + 0x00000000, // PA_CL_POINT_SIZE + 0x00000000, // PA_CL_POINT_CULL_RAD + 0x00000000, // VGT_DMA_BASE_HI + 0x00000000, // VGT_DMA_BASE +}; +static const unsigned int ci_SECT_CONTEXT_def_4[] = +{ + 0x00000000, // DB_DEPTH_CONTROL + 0x00000000, // DB_EQAA + 0x00000000, // CB_COLOR_CONTROL + 0x00000000, // DB_SHADER_CONTROL + 0x00090000, // PA_CL_CLIP_CNTL + 0x00000004, // PA_SU_SC_MODE_CNTL + 0x00000000, // PA_CL_VTE_CNTL + 0x00000000, // PA_CL_VS_OUT_CNTL + 0x00000000, // PA_CL_NANINF_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_SCALE + 0x00000000, // PA_SU_PRIM_FILTER_CNTL + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SU_POINT_SIZE + 0x00000000, // PA_SU_POINT_MINMAX + 0x00000000, // PA_SU_LINE_CNTL + 0x00000000, // PA_SC_LINE_STIPPLE + 0x00000000, // VGT_OUTPUT_PATH_CNTL + 0x00000000, // VGT_HOS_CNTL + 0x00000000, // VGT_HOS_MAX_TESS_LEVEL + 0x00000000, // VGT_HOS_MIN_TESS_LEVEL + 0x00000000, // VGT_HOS_REUSE_DEPTH + 0x00000000, // VGT_GROUP_PRIM_TYPE + 0x00000000, // VGT_GROUP_FIRST_DECR + 0x00000000, // VGT_GROUP_DECR + 0x00000000, // VGT_GROUP_VECT_0_CNTL + 0x00000000, // VGT_GROUP_VECT_1_CNTL + 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL + 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL + 0x00000000, // VGT_GS_MODE + 0x00000000, // VGT_GS_ONCHIP_CNTL + 0x00000000, // PA_SC_MODE_CNTL_0 + 0x00000000, // PA_SC_MODE_CNTL_1 + 0x00000000, // VGT_ENHANCE + 0x00000100, // VGT_GS_PER_ES + 0x00000080, // VGT_ES_PER_GS + 0x00000002, // VGT_GS_PER_VS + 0x00000000, // VGT_GSVS_RING_OFFSET_1 + 0x00000000, // VGT_GSVS_RING_OFFSET_2 + 0x00000000, // VGT_GSVS_RING_OFFSET_3 + 0x00000000, // VGT_GS_OUT_PRIM_TYPE + 0x00000000, // IA_ENHANCE +}; +static const unsigned int ci_SECT_CONTEXT_def_5[] = +{ + 0x00000000, // WD_ENHANCE + 0x00000000, // VGT_PRIMITIVEID_EN +}; +static const unsigned int ci_SECT_CONTEXT_def_6[] = +{ + 0x00000000, // VGT_PRIMITIVEID_RESET +}; +static const unsigned int ci_SECT_CONTEXT_def_7[] = +{ + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_INSTANCE_STEP_RATE_0 + 0x00000000, // VGT_INSTANCE_STEP_RATE_1 + 0x000000ff, // IA_MULTI_VGT_PARAM + 0x00000000, // VGT_ESGS_RING_ITEMSIZE + 0x00000000, // VGT_GSVS_RING_ITEMSIZE + 0x00000000, // VGT_REUSE_OFF + 0x00000000, // VGT_VTX_CNT_EN + 0x00000000, // DB_HTILE_SURFACE + 0x00000000, // DB_SRESULTS_COMPARE_STATE0 + 0x00000000, // DB_SRESULTS_COMPARE_STATE1 + 0x00000000, // DB_PRELOAD_CONTROL + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE + 0, // HOLE + 0x00000000, // VGT_GS_MAX_VERT_OUT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_SHADER_STAGES_EN + 0x00000000, // VGT_LS_HS_CONFIG + 0x00000000, // VGT_GS_VERT_ITEMSIZE + 0x00000000, // VGT_GS_VERT_ITEMSIZE_1 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_2 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_3 + 0x00000000, // VGT_TF_PARAM + 0x00000000, // DB_ALPHA_TO_MASK + 0, // HOLE + 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL + 0x00000000, // PA_SU_POLY_OFFSET_CLAMP + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET + 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET + 0x00000000, // VGT_GS_INSTANCE_CNT + 0x00000000, // VGT_STRMOUT_CONFIG + 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SC_CENTROID_PRIORITY_0 + 0x00000000, // PA_SC_CENTROID_PRIORITY_1 + 0x00001000, // PA_SC_LINE_CNTL + 0x00000000, // PA_SC_AA_CONFIG + 0x00000005, // PA_SU_VTX_CNTL + 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ + 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ + 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ + 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 + 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0 + 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL + 0x00000010, // VGT_OUT_DEALLOC_CNTL + 0x00000000, // CB_COLOR0_BASE + 0x00000000, // CB_COLOR0_PITCH + 0x00000000, // CB_COLOR0_SLICE + 0x00000000, // CB_COLOR0_VIEW + 0x00000000, // CB_COLOR0_INFO + 0x00000000, // CB_COLOR0_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR0_CMASK + 0x00000000, // CB_COLOR0_CMASK_SLICE + 0x00000000, // CB_COLOR0_FMASK + 0x00000000, // CB_COLOR0_FMASK_SLICE + 0x00000000, // CB_COLOR0_CLEAR_WORD0 + 0x00000000, // CB_COLOR0_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR1_BASE + 0x00000000, // CB_COLOR1_PITCH + 0x00000000, // CB_COLOR1_SLICE + 0x00000000, // CB_COLOR1_VIEW + 0x00000000, // CB_COLOR1_INFO + 0x00000000, // CB_COLOR1_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR1_CMASK + 0x00000000, // CB_COLOR1_CMASK_SLICE + 0x00000000, // CB_COLOR1_FMASK + 0x00000000, // CB_COLOR1_FMASK_SLICE + 0x00000000, // CB_COLOR1_CLEAR_WORD0 + 0x00000000, // CB_COLOR1_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR2_BASE + 0x00000000, // CB_COLOR2_PITCH + 0x00000000, // CB_COLOR2_SLICE + 0x00000000, // CB_COLOR2_VIEW + 0x00000000, // CB_COLOR2_INFO + 0x00000000, // CB_COLOR2_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR2_CMASK + 0x00000000, // CB_COLOR2_CMASK_SLICE + 0x00000000, // CB_COLOR2_FMASK + 0x00000000, // CB_COLOR2_FMASK_SLICE + 0x00000000, // CB_COLOR2_CLEAR_WORD0 + 0x00000000, // CB_COLOR2_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR3_BASE + 0x00000000, // CB_COLOR3_PITCH + 0x00000000, // CB_COLOR3_SLICE + 0x00000000, // CB_COLOR3_VIEW + 0x00000000, // CB_COLOR3_INFO + 0x00000000, // CB_COLOR3_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR3_CMASK + 0x00000000, // CB_COLOR3_CMASK_SLICE + 0x00000000, // CB_COLOR3_FMASK + 0x00000000, // CB_COLOR3_FMASK_SLICE + 0x00000000, // CB_COLOR3_CLEAR_WORD0 + 0x00000000, // CB_COLOR3_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR4_BASE + 0x00000000, // CB_COLOR4_PITCH + 0x00000000, // CB_COLOR4_SLICE + 0x00000000, // CB_COLOR4_VIEW + 0x00000000, // CB_COLOR4_INFO + 0x00000000, // CB_COLOR4_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR4_CMASK + 0x00000000, // CB_COLOR4_CMASK_SLICE + 0x00000000, // CB_COLOR4_FMASK + 0x00000000, // CB_COLOR4_FMASK_SLICE + 0x00000000, // CB_COLOR4_CLEAR_WORD0 + 0x00000000, // CB_COLOR4_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR5_BASE + 0x00000000, // CB_COLOR5_PITCH + 0x00000000, // CB_COLOR5_SLICE + 0x00000000, // CB_COLOR5_VIEW + 0x00000000, // CB_COLOR5_INFO + 0x00000000, // CB_COLOR5_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR5_CMASK + 0x00000000, // CB_COLOR5_CMASK_SLICE + 0x00000000, // CB_COLOR5_FMASK + 0x00000000, // CB_COLOR5_FMASK_SLICE + 0x00000000, // CB_COLOR5_CLEAR_WORD0 + 0x00000000, // CB_COLOR5_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR6_BASE + 0x00000000, // CB_COLOR6_PITCH + 0x00000000, // CB_COLOR6_SLICE + 0x00000000, // CB_COLOR6_VIEW + 0x00000000, // CB_COLOR6_INFO + 0x00000000, // CB_COLOR6_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR6_CMASK + 0x00000000, // CB_COLOR6_CMASK_SLICE + 0x00000000, // CB_COLOR6_FMASK + 0x00000000, // CB_COLOR6_FMASK_SLICE + 0x00000000, // CB_COLOR6_CLEAR_WORD0 + 0x00000000, // CB_COLOR6_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR7_BASE + 0x00000000, // CB_COLOR7_PITCH + 0x00000000, // CB_COLOR7_SLICE + 0x00000000, // CB_COLOR7_VIEW + 0x00000000, // CB_COLOR7_INFO + 0x00000000, // CB_COLOR7_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR7_CMASK + 0x00000000, // CB_COLOR7_CMASK_SLICE + 0x00000000, // CB_COLOR7_FMASK + 0x00000000, // CB_COLOR7_FMASK_SLICE + 0x00000000, // CB_COLOR7_CLEAR_WORD0 + 0x00000000, // CB_COLOR7_CLEAR_WORD1 +}; +static const struct cs_extent_def ci_SECT_CONTEXT_defs[] = +{ + {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 }, + {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 }, + {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 }, + {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 }, + {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, + {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, + {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, + { 0, 0, 0 } +}; +static const struct cs_section_def ci_cs_data[] = { + { ci_SECT_CONTEXT_defs, SECT_CONTEXT }, + { 0, SECT_NONE } +}; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 6fc876a444d4..2ce12ee3e67f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -140,6 +140,7 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl); extern void cayman_vm_decode_fault(struct radeon_device *rdev, u32 status, u32 addr); +void cik_init_cp_pg_table(struct radeon_device *rdev); static const u32 evergreen_golden_registers[] = { @@ -3893,8 +3894,22 @@ void sumo_rlc_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->rlc.clear_state_obj); rdev->rlc.clear_state_obj = NULL; } + + /* clear state block */ + if (rdev->rlc.cp_table_obj) { + r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); + radeon_bo_unpin(rdev->rlc.cp_table_obj); + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + + radeon_bo_unref(&rdev->rlc.cp_table_obj); + rdev->rlc.cp_table_obj = NULL; + } } +#define CP_ME_TABLE_SIZE 96 + int sumo_rlc_init(struct radeon_device *rdev) { const u32 *src_ptr; @@ -3980,9 +3995,10 @@ int sumo_rlc_init(struct radeon_device *rdev) } reg_list_blk_index = (3 * reg_list_num + 2); dws += reg_list_blk_index; + rdev->rlc.clear_state_size = dws; if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, + r = radeon_bo_create(rdev, rdev->rlc.clear_state_size * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); @@ -4046,6 +4062,45 @@ int sumo_rlc_init(struct radeon_device *rdev) radeon_bo_unreserve(rdev->rlc.clear_state_obj); } + if (rdev->rlc.cp_table_size) { + if (rdev->rlc.cp_table_obj == NULL) { + r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + } + + r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.cp_table_gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + + cik_init_cp_pg_table(rdev); + + radeon_bo_kunmap(rdev->rlc.cp_table_obj); + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b9706e83e827..5941ada063d1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -152,6 +152,14 @@ extern int radeon_aspm; #define RADEON_RESET_MC (1 << 10) #define RADEON_RESET_DISPLAY (1 << 11) +/* CG block flags */ +#define RADEON_CG_BLOCK_GFX (1 << 0) +#define RADEON_CG_BLOCK_MC (1 << 1) +#define RADEON_CG_BLOCK_SDMA (1 << 2) +#define RADEON_CG_BLOCK_UVD (1 << 3) +#define RADEON_CG_BLOCK_VCE (1 << 4) +#define RADEON_CG_BLOCK_HDP (1 << 5) + /* max cursor sizes (in pixels) */ #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 @@ -861,6 +869,12 @@ struct radeon_rlc { uint64_t clear_state_gpu_addr; volatile uint32_t *cs_ptr; const struct cs_section_def *cs_data; + u32 clear_state_size; + /* for cp tables */ + struct radeon_bo *cp_table_obj; + uint64_t cp_table_gpu_addr; + volatile uint32_t *cp_table_ptr; + u32 cp_table_size; }; int radeon_ib_get(struct radeon_device *rdev, int ring, diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 1926ec06a638..880551b6df61 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2780,6 +2780,7 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_BONAIRE: rdev->asic = &ci_asic; rdev->num_crtc = 6; + rdev->has_uvd = true; break; case CHIP_KAVERI: case CHIP_KABINI: @@ -2789,6 +2790,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->num_crtc = 4; else rdev->num_crtc = 2; + rdev->has_uvd = true; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 8b8963d4a732..4f91e1f4d814 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4901,7 +4901,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev, WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2); } -static void si_init_uvd_internal_cg(struct radeon_device *rdev) +void si_init_uvd_internal_cg(struct radeon_device *rdev) { bool hw_mode = true; -- cgit v1.2.3 From 1d58234d5e0a2475f1be78d1c05146a3af4f875d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 19 Apr 2013 13:03:37 -0400 Subject: drm/radeon: add indirect accessors for dift registers on CIK Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik_reg.h | 3 +++ drivers/gpu/drm/radeon/radeon.h | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h index d71e46d571f5..ca1bb6133580 100644 --- a/drivers/gpu/drm/radeon/cik_reg.h +++ b/drivers/gpu/drm/radeon/cik_reg.h @@ -24,6 +24,9 @@ #ifndef __CIK_REG_H__ #define __CIK_REG_H__ +#define CIK_DIDT_IND_INDEX 0xca00 +#define CIK_DIDT_IND_DATA 0xca04 + #define CIK_DC_GPIO_HPD_MASK 0x65b0 #define CIK_DC_GPIO_HPD_A 0x65b4 #define CIK_DC_GPIO_HPD_EN 0x65b8 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5941ada063d1..abf52687b309 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2141,6 +2141,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); #define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v)) #define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg)) #define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v)) +#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg)) +#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v)) #define WREG32_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32(reg); \ @@ -2272,6 +2274,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) WREG32(R600_UVD_CTX_DATA, (v)); } + +static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) +{ + u32 r; + + WREG32(CIK_DIDT_IND_INDEX, (reg)); + r = RREG32(CIK_DIDT_IND_DATA); + return r; +} + +static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + WREG32(CIK_DIDT_IND_INDEX, (reg)); + WREG32(CIK_DIDT_IND_DATA, (v)); +} + void r100_pll_errata_after_index(struct radeon_device *rdev); -- cgit v1.2.3 From 03243fc6568b3fa02de8de05daa02f021f16c3e4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 19 Apr 2013 13:29:43 -0400 Subject: drm/radeon/sumo add helper to go from vid7 to vid2 Needed for DPM on KB/KV. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/sumo_dpm.c | 14 ++++++++++++++ drivers/gpu/drm/radeon/sumo_dpm.h | 3 +++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index c0a850319908..2cefe59ef586 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1530,6 +1530,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; } +u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + u32 i; + + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; +} + static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, u32 vid_2bit) { diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h index 07dda299c784..db1ea32a907b 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.h +++ b/drivers/gpu/drm/radeon/sumo_dpm.h @@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev, u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, struct sumo_vid_mapping_table *vid_mapping_table, u32 vid_2bit); +u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit); u32 sumo_get_sleep_divider_from_id(u32 id); u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, u32 sclk, -- cgit v1.2.3 From f7466e6ca084e3b53d8f33c71af1e0e1b776a7b0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 19 Apr 2013 18:58:44 -0400 Subject: drm/radeon: switch to pptable.h Internally we switched to using a separate header for atombios pplib definitions. Switch over the open source driver. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios.h | 615 +--------------------------------- drivers/gpu/drm/radeon/pptable.h | 682 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 685 insertions(+), 612 deletions(-) create mode 100644 drivers/gpu/drm/radeon/pptable.h diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 16b120c3f144..af10f8571d87 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; }ATOM_POWERPLAY_INFO_V3; -/* New PPlib */ -/**************************************************************************/ -typedef struct _ATOM_PPLIB_THERMALCONTROLLER - -{ - UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* - UCHAR ucI2cLine; // as interpreted by DAL I2C - UCHAR ucI2cAddress; - UCHAR ucFanParameters; // Fan Control Parameters. - UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. - UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. - UCHAR ucReserved; // ---- - UCHAR ucFlags; // to be defined -} ATOM_PPLIB_THERMALCONTROLLER; - -#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f -#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. - -#define ATOM_PP_THERMALCONTROLLER_NONE 0 -#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_LM64 5 -#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 -#define ATOM_PP_THERMALCONTROLLER_RV770 8 -#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 -#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 -#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 -#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. -#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally -#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 -#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 -#define ATOM_PP_THERMALCONTROLLER_LM96163 17 -#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18 - -// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. -// We probably should reserve the bit 0x80 for this use. -// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). -// The driver can pick the correct internal controller based on the ASIC. - -#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller -#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller - -typedef struct _ATOM_PPLIB_STATE -{ - UCHAR ucNonClockStateIndex; - UCHAR ucClockStateIndices[1]; // variable-sized -} ATOM_PPLIB_STATE; - - -typedef struct _ATOM_PPLIB_FANTABLE -{ - UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. - UCHAR ucTHyst; // Temperature hysteresis. Integer. - USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. - USHORT usTMed; // The middle temperature where we change slopes. - USHORT usTHigh; // The high point above TMed for adjusting the second slope. - USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). - USHORT usPWMMed; // The PWM value (in percent) at TMed. - USHORT usPWMHigh; // The PWM value at THigh. -} ATOM_PPLIB_FANTABLE; - -typedef struct _ATOM_PPLIB_FANTABLE2 -{ - ATOM_PPLIB_FANTABLE basicTable; - USHORT usTMax; // The max temperature -} ATOM_PPLIB_FANTABLE2; - -typedef struct _ATOM_PPLIB_EXTENDEDHEADER -{ - USHORT usSize; - ULONG ulMaxEngineClock; // For Overdrive. - ULONG ulMaxMemoryClock; // For Overdrive. - // Add extra system parameters here, always adjust size to include all fields. - USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table - USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table - USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table - USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table -} ATOM_PPLIB_EXTENDEDHEADER; - -//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps -#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 -#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 -#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 -#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 -#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 -#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 -#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 -#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 -#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 -#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 -#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 -#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 -#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 -#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. -#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). -#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. -#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. -#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. -#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table. -#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity. -#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17. -#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - - UCHAR ucDataRevision; - - UCHAR ucNumStates; - UCHAR ucStateEntrySize; - UCHAR ucClockInfoSize; - UCHAR ucNonClockSize; - - // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures - USHORT usStateArrayOffset; - - // offset from start of this table to array of ASIC-specific structures, - // currently ATOM_PPLIB_CLOCK_INFO. - USHORT usClockInfoArrayOffset; - - // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO - USHORT usNonClockInfoArrayOffset; - - USHORT usBackbiasTime; // in microseconds - USHORT usVoltageTime; // in microseconds - USHORT usTableSize; //the size of this structure, or the extended structure - - ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* - - ATOM_PPLIB_THERMALCONTROLLER sThermalController; - - USHORT usBootClockInfoOffset; - USHORT usBootNonClockInfoOffset; - -} ATOM_PPLIB_POWERPLAYTABLE; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 -{ - ATOM_PPLIB_POWERPLAYTABLE basicTable; - UCHAR ucNumCustomThermalPolicy; - USHORT usCustomThermalPolicyArrayOffset; -}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 -{ - ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; - USHORT usFormatID; // To be used ONLY by PPGen. - USHORT usFanTableOffset; - USHORT usExtendendedHeaderOffset; -} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 -{ - ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; - ULONG ulGoldenPPID; // PPGen use only - ULONG ulGoldenRevision; // PPGen use only - USHORT usVddcDependencyOnSCLKOffset; - USHORT usVddciDependencyOnMCLKOffset; - USHORT usVddcDependencyOnMCLKOffset; - USHORT usMaxClockVoltageOnDCOffset; - USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table - USHORT usMvddDependencyOnMCLKOffset; -} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 -{ - ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; - ULONG ulTDPLimit; - ULONG ulNearTDPLimit; - ULONG ulSQRampingThreshold; - USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table - ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table - USHORT usTDPODLimit; - USHORT usLoadLineSlope; // in milliOhms * 100 -} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; - -//// ATOM_PPLIB_NONCLOCK_INFO::usClassification -#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 -#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 -#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 -#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 -#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 -#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 -// 2, 4, 6, 7 are reserved - -#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 -#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 -#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 -#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 -#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 -#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 -#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 -#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 -#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 -#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 -#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 -#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 -#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 - -//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 -#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 -#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 -#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D) - -//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings -#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 -#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 - -// 0 is 2.5Gb/s, 1 is 5Gb/s -#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 -#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 - -// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec -#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 -#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 - -// lookup into reduced refresh-rate table -#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 -#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 - -#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 -#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 -// 2-15 TBD as needed. - -#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 -#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 - -#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 - -#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 - -//memory related flags -#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 - -//M3 Arb //2bits, current 3 sets of parameters in total -#define ATOM_PPLIB_M3ARB_MASK 0x00060000 -#define ATOM_PPLIB_M3ARB_SHIFT 17 - -#define ATOM_PPLIB_ENABLE_DRR 0x00080000 - -// remaining 16 bits are reserved -typedef struct _ATOM_PPLIB_THERMAL_STATE -{ - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucThermalAction; -}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; - -// Contained in an array starting at the offset -// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. -// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex -#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 -#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 -typedef struct _ATOM_PPLIB_NONCLOCK_INFO -{ - USHORT usClassification; - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - ULONG ulCapsAndSettings; - UCHAR ucRequiredPower; - USHORT usClassification2; - ULONG ulVCLK; - ULONG ulDCLK; - UCHAR ucUnused[5]; -} ATOM_PPLIB_NONCLOCK_INFO; - -// Contained in an array starting at the offset -// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. -// referenced from ATOM_PPLIB_STATE::ucClockStateIndices -typedef struct _ATOM_PPLIB_R600_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usUnused1; - USHORT usUnused2; - - ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* - -} ATOM_PPLIB_R600_CLOCK_INFO; - -// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO -#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 -#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 -#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 -#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 -#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 -#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). - -typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usVDDCI; - USHORT usUnused; - - ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* - -} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_SI_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usVDDCI; - UCHAR ucPCIEGen; - UCHAR ucUnused1; - - ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now - -} ATOM_PPLIB_SI_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_CI_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - UCHAR ucPCIEGen; - USHORT usPCIELane; -} ATOM_PPLIB_CI_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO - -{ - USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). - UCHAR ucLowEngineClockHigh; - USHORT usHighEngineClockLow; // High Engine clock in MHz. - UCHAR ucHighEngineClockHigh; - USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. - UCHAR ucMemoryClockHigh; // Currentyl unused. - UCHAR ucPadding; // For proper alignment and size. - USHORT usVDDC; // For the 780, use: None, Low, High, Variable - UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} - UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. - USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). - ULONG ulFlags; -} ATOM_PPLIB_RS780_CLOCK_INFO; - -#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 -#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 -#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 -#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 - -#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. -#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 -#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 - -#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 -#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 -#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 - -typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ - USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz - UCHAR ucEngineClockHigh; //clockfrequency >> 16. - UCHAR vddcIndex; //2-bit vddc index; - USHORT tdpLimit; - //please initalize to 0 - USHORT rsv1; - //please initialize to 0s - ULONG rsv2[2]; -}ATOM_PPLIB_SUMO_CLOCK_INFO; - - - -typedef struct _ATOM_PPLIB_STATE_V2 -{ - //number of valid dpm levels in this state; Driver uses it to calculate the whole - //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) - UCHAR ucNumDPMLevels; - - //a index to the array of nonClockInfos - UCHAR nonClockInfoIndex; - /** - * Driver will read the first ucNumDPMLevels in this array - */ - UCHAR clockInfoIndex[1]; -} ATOM_PPLIB_STATE_V2; - -typedef struct _StateArray{ - //how many states we have - UCHAR ucNumEntries; - - ATOM_PPLIB_STATE_V2 states[1]; -}StateArray; - - -typedef struct _ClockInfoArray{ - //how many clock levels we have - UCHAR ucNumEntries; - - //sizeof(ATOM_PPLIB_CLOCK_INFO) - UCHAR ucEntrySize; - - UCHAR clockInfo[1]; -}ClockInfoArray; - -typedef struct _NonClockInfoArray{ - - //how many non-clock levels we have. normally should be same as number of states - UCHAR ucNumEntries; - //sizeof(ATOM_PPLIB_NONCLOCK_INFO) - UCHAR ucEntrySize; - - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; -}NonClockInfoArray; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record -{ - USHORT usClockLow; - UCHAR ucClockHigh; - USHORT usVoltage; -}ATOM_PPLIB_Clock_Voltage_Dependency_Record; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_Clock_Voltage_Dependency_Table; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record -{ - USHORT usSclkLow; - UCHAR ucSclkHigh; - USHORT usMclkLow; - UCHAR ucMclkHigh; - USHORT usVddc; - USHORT usVddci; -}ATOM_PPLIB_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_Clock_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_CAC_Leakage_Record -{ - USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. - ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. -}ATOM_PPLIB_CAC_Leakage_Record; - -typedef struct _ATOM_PPLIB_CAC_Leakage_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_CAC_Leakage_Table; - -typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record -{ - USHORT usVoltage; - USHORT usSclkLow; - UCHAR ucSclkHigh; - USHORT usMclkLow; - UCHAR ucMclkHigh; -}ATOM_PPLIB_PhaseSheddingLimits_Record; - -typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_PhaseSheddingLimits_Table; - -typedef struct _VCEClockInfo{ - USHORT usEVClkLow; - UCHAR ucEVClkHigh; - USHORT usECClkLow; - UCHAR ucECClkHigh; -}VCEClockInfo; - -typedef struct _VCEClockInfoArray{ - UCHAR ucNumEntries; - VCEClockInfo entries[1]; -}VCEClockInfoArray; - -typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record -{ - USHORT usVoltage; - UCHAR ucVCEClockInfoIndex; -}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table -{ - UCHAR numEntries; - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_VCE_State_Record -{ - UCHAR ucVCEClockInfoIndex; - UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary -}ATOM_PPLIB_VCE_State_Record; - -typedef struct _ATOM_PPLIB_VCE_State_Table -{ - UCHAR numEntries; - ATOM_PPLIB_VCE_State_Record entries[1]; -}ATOM_PPLIB_VCE_State_Table; - - -typedef struct _ATOM_PPLIB_VCE_Table -{ - UCHAR revid; -// VCEClockInfoArray array; -// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits; -// ATOM_PPLIB_VCE_State_Table states; -}ATOM_PPLIB_VCE_Table; - - -typedef struct _UVDClockInfo{ - USHORT usVClkLow; - UCHAR ucVClkHigh; - USHORT usDClkLow; - UCHAR ucDClkHigh; -}UVDClockInfo; - -typedef struct _UVDClockInfoArray{ - UCHAR ucNumEntries; - UVDClockInfo entries[1]; -}UVDClockInfoArray; - -typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record -{ - USHORT usVoltage; - UCHAR ucUVDClockInfoIndex; -}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table -{ - UCHAR numEntries; - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_UVD_State_Record -{ - UCHAR ucUVDClockInfoIndex; - UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary -}ATOM_PPLIB_UVD_State_Record; - -typedef struct _ATOM_PPLIB_UVD_State_Table -{ - UCHAR numEntries; - ATOM_PPLIB_UVD_State_Record entries[1]; -}ATOM_PPLIB_UVD_State_Table; - - -typedef struct _ATOM_PPLIB_UVD_Table -{ - UCHAR revid; -// UVDClockInfoArray array; -// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits; -// ATOM_PPLIB_UVD_State_Table states; -}ATOM_PPLIB_UVD_Table; - - -typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record -{ - USHORT usVoltage; - USHORT usSAMClockLow; - UCHAR ucSAMClockHigh; -}ATOM_PPLIB_SAMClk_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_SAMClk_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_SAMU_Table -{ - UCHAR revid; - ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits; -}ATOM_PPLIB_SAMU_Table; - -#define ATOM_PPM_A_A 1 -#define ATOM_PPM_A_I 2 -typedef struct _ATOM_PPLIB_PPM_Table -{ - UCHAR ucRevId; - UCHAR ucPpmDesign; //A+I or A+A - USHORT usCpuCoreNumber; - ULONG ulPlatformTDP; - ULONG ulSmallACPlatformTDP; - ULONG ulPlatformTDC; - ULONG ulSmallACPlatformTDC; - ULONG ulApuTDP; - ULONG ulDGpuTDP; - ULONG ulDGpuUlvPower; - ULONG ulTjmax; -} ATOM_PPLIB_PPM_Table; - -/**************************************************************************/ - // Following definitions are for compatibility issue in different SW components. #define ATOM_MASTER_DATA_TABLE_REVISION 0x01 @@ -8485,3 +7873,6 @@ typedef struct { #endif /* _ATOMBIOS_H */ + +#include "pptable.h" + diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h new file mode 100644 index 000000000000..da43ab328833 --- /dev/null +++ b/drivers/gpu/drm/radeon/pptable.h @@ -0,0 +1,682 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PPTABLE_H +#define _PPTABLE_H + +#pragma pack(push, 1) + +typedef struct _ATOM_PPLIB_THERMALCONTROLLER + +{ + UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* + UCHAR ucI2cLine; // as interpreted by DAL I2C + UCHAR ucI2cAddress; + UCHAR ucFanParameters; // Fan Control Parameters. + UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. + UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. + UCHAR ucReserved; // ---- + UCHAR ucFlags; // to be defined +} ATOM_PPLIB_THERMALCONTROLLER; + +#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. + +#define ATOM_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_LM64 5 +#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 +#define ATOM_PP_THERMALCONTROLLER_RV770 8 +#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 +#define ATOM_PP_THERMALCONTROLLER_KONG 10 +#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 +#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. +#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally +#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 +#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 +#define ATOM_PP_THERMALCONTROLLER_LM96163 17 +#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18 +#define ATOM_PP_THERMALCONTROLLER_KAVERI 19 + + +// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. +// We probably should reserve the bit 0x80 for this use. +// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). +// The driver can pick the correct internal controller based on the ASIC. + +#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller +#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller + +typedef struct _ATOM_PPLIB_STATE +{ + UCHAR ucNonClockStateIndex; + UCHAR ucClockStateIndices[1]; // variable-sized +} ATOM_PPLIB_STATE; + + +typedef struct _ATOM_PPLIB_FANTABLE +{ + UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. + UCHAR ucTHyst; // Temperature hysteresis. Integer. + USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. + USHORT usTMed; // The middle temperature where we change slopes. + USHORT usTHigh; // The high point above TMed for adjusting the second slope. + USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). + USHORT usPWMMed; // The PWM value (in percent) at TMed. + USHORT usPWMHigh; // The PWM value at THigh. +} ATOM_PPLIB_FANTABLE; + +typedef struct _ATOM_PPLIB_FANTABLE2 +{ + ATOM_PPLIB_FANTABLE basicTable; + USHORT usTMax; // The max temperature +} ATOM_PPLIB_FANTABLE2; + +typedef struct _ATOM_PPLIB_EXTENDEDHEADER +{ + USHORT usSize; + ULONG ulMaxEngineClock; // For Overdrive. + ULONG ulMaxMemoryClock; // For Overdrive. + // Add extra system parameters here, always adjust size to include all fields. + USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table + USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table + USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table + USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table + USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table + USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table +} ATOM_PPLIB_EXTENDEDHEADER; + +//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps +#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 +#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 +#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 +#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 +#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 +#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 +#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 +#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 +#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 +#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 +#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 +#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 +#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 +#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. +#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). +#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. +#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. +#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. +#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table. +#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity. +#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17. +#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. +#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature. +#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000 + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + + UCHAR ucDataRevision; + + UCHAR ucNumStates; + UCHAR ucStateEntrySize; + UCHAR ucClockInfoSize; + UCHAR ucNonClockSize; + + // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures + USHORT usStateArrayOffset; + + // offset from start of this table to array of ASIC-specific structures, + // currently ATOM_PPLIB_CLOCK_INFO. + USHORT usClockInfoArrayOffset; + + // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO + USHORT usNonClockInfoArrayOffset; + + USHORT usBackbiasTime; // in microseconds + USHORT usVoltageTime; // in microseconds + USHORT usTableSize; //the size of this structure, or the extended structure + + ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* + + ATOM_PPLIB_THERMALCONTROLLER sThermalController; + + USHORT usBootClockInfoOffset; + USHORT usBootNonClockInfoOffset; + +} ATOM_PPLIB_POWERPLAYTABLE; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 +{ + ATOM_PPLIB_POWERPLAYTABLE basicTable; + UCHAR ucNumCustomThermalPolicy; + USHORT usCustomThermalPolicyArrayOffset; +}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 +{ + ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; + USHORT usFormatID; // To be used ONLY by PPGen. + USHORT usFanTableOffset; + USHORT usExtendendedHeaderOffset; +} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 +{ + ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; + ULONG ulGoldenPPID; // PPGen use only + ULONG ulGoldenRevision; // PPGen use only + USHORT usVddcDependencyOnSCLKOffset; + USHORT usVddciDependencyOnMCLKOffset; + USHORT usVddcDependencyOnMCLKOffset; + USHORT usMaxClockVoltageOnDCOffset; + USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table + USHORT usMvddDependencyOnMCLKOffset; +} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 +{ + ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; + ULONG ulTDPLimit; + ULONG ulNearTDPLimit; + ULONG ulSQRampingThreshold; + USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table + ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table + USHORT usTDPODLimit; + USHORT usLoadLineSlope; // in milliOhms * 100 +} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; + +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +// 2, 4, 6, 7 are reserved + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 +#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 +#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 +#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 +#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 +#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 +#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 + +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 +#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 +#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D) + +//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings +#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 +#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 + +// 0 is 2.5Gb/s, 1 is 5Gb/s +#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 +#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 + +// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec +#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 +#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 + +// lookup into reduced refresh-rate table +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 + +#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 +// 2-15 TBD as needed. + +#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 +#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 + +#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 + +#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 + +//memory related flags +#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 + +//M3 Arb //2bits, current 3 sets of parameters in total +#define ATOM_PPLIB_M3ARB_MASK 0x00060000 +#define ATOM_PPLIB_M3ARB_SHIFT 17 + +#define ATOM_PPLIB_ENABLE_DRR 0x00080000 + +// remaining 16 bits are reserved +typedef struct _ATOM_PPLIB_THERMAL_STATE +{ + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucThermalAction; +}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 +typedef struct _ATOM_PPLIB_NONCLOCK_INFO +{ + USHORT usClassification; + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + ULONG ulCapsAndSettings; + UCHAR ucRequiredPower; + USHORT usClassification2; + ULONG ulVCLK; + ULONG ulDCLK; + UCHAR ucUnused[5]; +} ATOM_PPLIB_NONCLOCK_INFO; + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE::ucClockStateIndices +typedef struct _ATOM_PPLIB_R600_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usUnused1; + USHORT usUnused2; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_R600_CLOCK_INFO; + +// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO +#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 +#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 +#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 +#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). + +typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + +{ + USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). + UCHAR ucLowEngineClockHigh; + USHORT usHighEngineClockLow; // High Engine clock in MHz. + UCHAR ucHighEngineClockHigh; + USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. + UCHAR ucMemoryClockHigh; // Currentyl unused. + UCHAR ucPadding; // For proper alignment and size. + USHORT usVDDC; // For the 780, use: None, Low, High, Variable + UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could + USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). + ULONG ulFlags; +} ATOM_PPLIB_RS780_CLOCK_INFO; + +#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 +#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 +#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 +#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 + +#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. +#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 +#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 + +#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 +#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 +#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 + +typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + USHORT usUnused; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_SI_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + UCHAR ucPCIEGen; + UCHAR ucUnused1; + + ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now + +} ATOM_PPLIB_SI_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_CI_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + UCHAR ucPCIEGen; + USHORT usPCIELane; +} ATOM_PPLIB_CI_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ + USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz + UCHAR ucEngineClockHigh; //clockfrequency >> 16. + UCHAR vddcIndex; //2-bit vddc index; + USHORT tdpLimit; + //please initalize to 0 + USHORT rsv1; + //please initialize to 0s + ULONG rsv2[2]; +}ATOM_PPLIB_SUMO_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_STATE_V2 +{ + //number of valid dpm levels in this state; Driver uses it to calculate the whole + //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + UCHAR ucNumDPMLevels; + + //a index to the array of nonClockInfos + UCHAR nonClockInfoIndex; + /** + * Driver will read the first ucNumDPMLevels in this array + */ + UCHAR clockInfoIndex[1]; +} ATOM_PPLIB_STATE_V2; + +typedef struct _StateArray{ + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[1]; +}StateArray; + + +typedef struct _ClockInfoArray{ + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_CLOCK_INFO) + UCHAR ucEntrySize; + + UCHAR clockInfo[1]; +}ClockInfoArray; + +typedef struct _NonClockInfoArray{ + + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; + + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; +}NonClockInfoArray; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record +{ + USHORT usClockLow; + UCHAR ucClockHigh; + USHORT usVoltage; +}ATOM_PPLIB_Clock_Voltage_Dependency_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Dependency_Table; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record +{ + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; + USHORT usVddc; + USHORT usVddci; +}ATOM_PPLIB_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Limit_Table; + +union _ATOM_PPLIB_CAC_Leakage_Record +{ + struct + { + USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd + ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd + + }; + struct + { + USHORT usVddc1; + USHORT usVddc2; + USHORT usVddc3; + }; +}; + +typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record; + +typedef struct _ATOM_PPLIB_CAC_Leakage_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_CAC_Leakage_Table; + +typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record +{ + USHORT usVoltage; + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; +}ATOM_PPLIB_PhaseSheddingLimits_Record; + +typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_PhaseSheddingLimits_Table; + +typedef struct _VCEClockInfo{ + USHORT usEVClkLow; + UCHAR ucEVClkHigh; + USHORT usECClkLow; + UCHAR ucECClkHigh; +}VCEClockInfo; + +typedef struct _VCEClockInfoArray{ + UCHAR ucNumEntries; + VCEClockInfo entries[1]; +}VCEClockInfoArray; + +typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record +{ + USHORT usVoltage; + UCHAR ucVCEClockInfoIndex; +}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table +{ + UCHAR numEntries; + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_VCE_State_Record +{ + UCHAR ucVCEClockInfoIndex; + UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary +}ATOM_PPLIB_VCE_State_Record; + +typedef struct _ATOM_PPLIB_VCE_State_Table +{ + UCHAR numEntries; + ATOM_PPLIB_VCE_State_Record entries[1]; +}ATOM_PPLIB_VCE_State_Table; + + +typedef struct _ATOM_PPLIB_VCE_Table +{ + UCHAR revid; +// VCEClockInfoArray array; +// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits; +// ATOM_PPLIB_VCE_State_Table states; +}ATOM_PPLIB_VCE_Table; + + +typedef struct _UVDClockInfo{ + USHORT usVClkLow; + UCHAR ucVClkHigh; + USHORT usDClkLow; + UCHAR ucDClkHigh; +}UVDClockInfo; + +typedef struct _UVDClockInfoArray{ + UCHAR ucNumEntries; + UVDClockInfo entries[1]; +}UVDClockInfoArray; + +typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record +{ + USHORT usVoltage; + UCHAR ucUVDClockInfoIndex; +}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table +{ + UCHAR numEntries; + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_UVD_Table +{ + UCHAR revid; +// UVDClockInfoArray array; +// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits; +}ATOM_PPLIB_UVD_Table; + +typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record +{ + USHORT usVoltage; + USHORT usSAMClockLow; + UCHAR ucSAMClockHigh; +}ATOM_PPLIB_SAMClk_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ + UCHAR numEntries; + ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_SAMClk_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_SAMU_Table +{ + UCHAR revid; + ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits; +}ATOM_PPLIB_SAMU_Table; + +typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record +{ + USHORT usVoltage; + USHORT usACPClockLow; + UCHAR ucACPClockHigh; +}ATOM_PPLIB_ACPClk_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{ + UCHAR numEntries; + ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_ACPClk_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_ACP_Table +{ + UCHAR revid; + ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits; +}ATOM_PPLIB_ACP_Table; + +typedef struct _ATOM_PowerTune_Table{ + USHORT usTDP; + USHORT usConfigurableTDP; + USHORT usTDC; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usLowCACLeakage; + USHORT usHighCACLeakage; +}ATOM_PowerTune_Table; + +typedef struct _ATOM_PPLIB_POWERTUNE_Table +{ + UCHAR revid; + ATOM_PowerTune_Table power_tune_table; +}ATOM_PPLIB_POWERTUNE_Table; + +typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1 +{ + UCHAR revid; + ATOM_PowerTune_Table power_tune_table; + USHORT usMaximumPowerDeliveryLimit; + USHORT usReserve[7]; +} ATOM_PPLIB_POWERTUNE_Table_V1; + +#define ATOM_PPM_A_A 1 +#define ATOM_PPM_A_I 2 +typedef struct _ATOM_PPLIB_PPM_Table +{ + UCHAR ucRevId; + UCHAR ucPpmDesign; //A+I or A+A + USHORT usCpuCoreNumber; + ULONG ulPlatformTDP; + ULONG ulSmallACPlatformTDP; + ULONG ulPlatformTDC; + ULONG ulSmallACPlatformTDC; + ULONG ulApuTDP; + ULONG ulDGpuTDP; + ULONG ulDGpuUlvPower; + ULONG ulTjmax; +} ATOM_PPLIB_PPM_Table; + +#pragma pack(pop) + +#endif -- cgit v1.2.3 From 84a9d9eeabdca05321a7c890eef485770dade012 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 19 Apr 2013 19:11:37 -0400 Subject: drm/radeon: add structs to store uvd clock voltage deps Used for uvd power management. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index abf52687b309..262c4c47c319 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1276,6 +1276,17 @@ struct radeon_phase_shedding_limits_table { struct radeon_phase_shedding_limits_entry *entries; }; +struct radeon_uvd_clock_voltage_dependency_entry { + u32 vclk; + u32 dclk; + u16 v; +}; + +struct radeon_uvd_clock_voltage_dependency_table { + u8 count; + struct radeon_uvd_clock_voltage_dependency_entry *entries; +}; + struct radeon_ppm_table { u8 ppm_design; u16 cpu_core_number; @@ -1294,6 +1305,7 @@ struct radeon_dpm_dynamic_state { struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk; struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; + struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; struct radeon_clock_array valid_sclk_values; struct radeon_clock_array valid_mclk_values; struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; -- cgit v1.2.3 From a412fce0548105f14e48d25094d98fc87f7c0df4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Apr 2013 20:23:31 -0400 Subject: drm/radeon/cik: add rlc helpers for DPM Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 29 +++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/cikd.h | 9 +++++++++ 2 files changed, 38 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a36e98c9a875..727c296662f1 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5587,6 +5587,35 @@ static u32 cik_halt_rlc(struct radeon_device *rdev) return orig; } +void cik_enter_rlc_safe_mode(struct radeon_device *rdev) +{ + u32 tmp, i, mask; + + tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE); + WREG32(RLC_GPR_REG2, tmp); + + mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS; + for (i = 0; i < rdev->usec_timeout; i++) { + if ((RREG32(RLC_GPM_STAT) & mask) == mask) + break; + udelay(1); + } + + for (i = 0; i < rdev->usec_timeout; i++) { + if ((RREG32(RLC_GPR_REG2) & REQ) == 0) + break; + udelay(1); + } +} + +void cik_exit_rlc_safe_mode(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE); + WREG32(RLC_GPR_REG2, tmp); +} + /** * cik_rlc_stop - stop the RLC ME * diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 63955abb1e11..116b3131a683 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -952,6 +952,8 @@ #define RLC_GPM_STAT 0xC400 # define RLC_GPM_BUSY (1 << 0) +# define GFX_POWER_STATUS (1 << 1) +# define GFX_CLOCK_STATUS (1 << 2) #define RLC_PG_CNTL 0xC40C # define GFX_PG_ENABLE (1 << 0) @@ -1004,6 +1006,13 @@ #define RLC_GPM_SCRATCH_ADDR 0xC4B0 #define RLC_GPM_SCRATCH_DATA 0xC4B4 +#define RLC_GPR_REG2 0xC4E8 +#define REQ 0x00000001 +#define MESSAGE(x) ((x) << 1) +#define MESSAGE_MASK 0x0000001e +#define MSG_ENTER_RLC_SAFE_MODE 1 +#define MSG_EXIT_RLC_SAFE_MODE 0 + #define CP_HPD_EOP_BASE_ADDR 0xC904 #define CP_HPD_EOP_BASE_ADDR_HI 0xC908 #define CP_HPD_EOP_VMID 0xC90C -- cgit v1.2.3 From 16fbe00d247d3f1ba7b3a614a3f9fe60e68ce30d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Apr 2013 21:41:26 -0400 Subject: drm/radeon: add support for thermal controller on KB/KV No support for reading temperature back yet. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_atombios.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 262c4c47c319..d1f5f7bb052c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1132,6 +1132,7 @@ enum radeon_int_thermal_type { THERMAL_TYPE_SI, THERMAL_TYPE_EMC2103_WITH_INTERNAL, THERMAL_TYPE_CI, + THERMAL_TYPE_KV, }; struct radeon_voltage { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 27de73c162c9..6247b5e2d074 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2237,6 +2237,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_CI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == -- cgit v1.2.3 From 2aacd48fa76076d9eb078ccb084da26ddc835b5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 26 Mar 2013 19:25:29 -0400 Subject: drm/radeon: add CI to r600_is_internal_thermal_sensor() Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e5c860f4ccbe..e246e3a90c53 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -745,6 +745,7 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) case THERMAL_TYPE_SUMO: case THERMAL_TYPE_NI: case THERMAL_TYPE_SI: + case THERMAL_TYPE_CI: return true; case THERMAL_TYPE_ADT7473_WITH_INTERNAL: case THERMAL_TYPE_EMC2103_WITH_INTERNAL: -- cgit v1.2.3 From 12262906b179f56c377871543e18327f4df7de21 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 16 Jul 2013 16:59:08 -0400 Subject: drm/radeon: add KB/KV to r600_is_internal_thermal_sensor Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e246e3a90c53..d54a83864ad4 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -746,6 +746,7 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) case THERMAL_TYPE_NI: case THERMAL_TYPE_SI: case THERMAL_TYPE_CI: + case THERMAL_TYPE_KV: return true; case THERMAL_TYPE_ADT7473_WITH_INTERNAL: case THERMAL_TYPE_EMC2103_WITH_INTERNAL: -- cgit v1.2.3 From 286d9cc67a87863ba510b22d3f32cbeed9864b85 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Jun 2013 15:50:47 -0400 Subject: drm/radeon: add get_temperature() callbacks for CIK (v2) This added support for the on-chip thermal sensors on CIK asics. v2: fix register offset. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 37 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/cikd.h | 8 ++++++++ drivers/gpu/drm/radeon/radeon_asic.c | 2 ++ drivers/gpu/drm/radeon/radeon_asic.h | 2 ++ drivers/gpu/drm/radeon/radeon_pm.c | 2 ++ 5 files changed, 51 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 727c296662f1..d0804f79efed 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -69,6 +69,43 @@ static void cik_program_aspm(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev); +/* get temperature in millidegrees */ +int ci_get_temp(struct radeon_device *rdev) +{ + u32 temp; + int actual_temp = 0; + + temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> + CTF_TEMP_SHIFT; + + if (temp & 0x200) + actual_temp = 255; + else + actual_temp = temp & 0x1ff; + + actual_temp = actual_temp * 1000; + + return actual_temp; +} + +/* get temperature in millidegrees */ +int kv_get_temp(struct radeon_device *rdev) +{ + u32 temp; + int actual_temp = 0; + + temp = RREG32_SMC(0xC0300E0C); + + if (temp) + actual_temp = (temp / 8) - 49; + else + actual_temp = 0; + + actual_temp = actual_temp * 1000; + + return actual_temp; +} + /* * Indirect registers accessor */ diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 116b3131a683..65886caaf756 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -32,6 +32,14 @@ #define GENERAL_PWRMGT 0xC0200000 # define GPU_COUNTER_CLK (1 << 15) +#define CG_MULT_THERMAL_STATUS 0xC0300014 +#define ASIC_MAX_TEMP(x) ((x) << 0) +#define ASIC_MAX_TEMP_MASK 0x000001ff +#define ASIC_MAX_TEMP_SHIFT 0 +#define CTF_TEMP(x) ((x) << 9) +#define CTF_TEMP_MASK 0x0003fe00 +#define CTF_TEMP_SHIFT 9 + #define MPLL_BYPASSCLK_SEL 0xC050019C # define MPLL_CLKOUT_SEL(x) ((x) << 8) # define MPLL_CLKOUT_SEL_MASK 0xFF00 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 880551b6df61..3a55540fe280 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2452,6 +2452,7 @@ static struct radeon_asic ci_asic = { .set_pcie_lanes = NULL, .set_clock_gating = NULL, .set_uvd_clocks = &cik_set_uvd_clocks, + .get_temperature = &ci_get_temp, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, @@ -2607,6 +2608,7 @@ static struct radeon_asic kv_asic = { .set_pcie_lanes = NULL, .set_clock_gating = NULL, .set_uvd_clocks = &cik_set_uvd_clocks, + .get_temperature = &kv_get_temp, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3cf7d89c1bd8..d5c6c5b10edf 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -747,5 +747,7 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring); void cik_compute_ring_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring); +int ci_get_temp(struct radeon_device *rdev); +int kv_get_temp(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 79a03de4ac0a..1408014dce8f 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev) case THERMAL_TYPE_NI: case THERMAL_TYPE_SUMO: case THERMAL_TYPE_SI: + case THERMAL_TYPE_CI: + case THERMAL_TYPE_KV: if (rdev->asic->pm.get_temperature == NULL) return err; rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); -- cgit v1.2.3 From 9dd9333b2fac7b0ff00574693f3192926e3466fe Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 29 Apr 2013 18:53:52 -0400 Subject: drm/radeon: adjust si_dpm function for code sharing Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/si_dpm.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 88699e3cd868..0f8be48c2ef4 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3759,19 +3759,20 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev, } static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, + u32 max_voltage_steps, struct atom_voltage_table *voltage_table) { unsigned int i, diff; - if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS) + if (voltage_table->count <= max_voltage_steps) return; - diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS; + diff = voltage_table->count - max_voltage_steps; - for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++) + for (i= 0; i < max_voltage_steps; i++) voltage_table->entries[i] = voltage_table->entries[i + diff]; - voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS; + voltage_table->count = max_voltage_steps; } static int si_construct_voltage_tables(struct radeon_device *rdev) @@ -3787,7 +3788,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev) return ret; if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table); + si_trim_voltage_table_to_fit_state_table(rdev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddc_voltage_table); if (eg_pi->vddci_control) { ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, @@ -3796,7 +3799,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev) return ret; if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table); + si_trim_voltage_table_to_fit_state_table(rdev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddci_voltage_table); } if (pi->mvdd_control) { @@ -3814,7 +3819,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev) } if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table); + si_trim_voltage_table_to_fit_state_table(rdev, + SISLANDS_MAX_NO_VREG_STEPS, + &si_pi->mvdd_voltage_table); } if (si_pi->vddc_phase_shed_control) { -- cgit v1.2.3 From ef976ec4e2ae6d91a9aab5714071d1eed0115ed6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 6 May 2013 11:31:04 -0400 Subject: drm/radeon/dpm: update cac leakage table parsing for CI Uses a different table format if the board supports EVV. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 17 +++++++++++++---- drivers/gpu/drm/radeon/radeon.h | 15 +++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index d54a83864ad4..ccdf770dd770 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -956,10 +956,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) return -ENOMEM; } for (i = 0; i < cac_table->ucNumEntries; i++) { - rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = - le16_to_cpu(cac_table->entries[i].usVddc); - rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = - le32_to_cpu(cac_table->entries[i].ulLeakageValue); + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = + le16_to_cpu(cac_table->entries[i].usVddc1); + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = + le16_to_cpu(cac_table->entries[i].usVddc2); + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = + le16_to_cpu(cac_table->entries[i].usVddc3); + } else { + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = + le16_to_cpu(cac_table->entries[i].usVddc); + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = + le32_to_cpu(cac_table->entries[i].ulLeakageValue); + } } rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d1f5f7bb052c..3376107f3b1e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1256,14 +1256,21 @@ struct radeon_clock_voltage_dependency_table { struct radeon_clock_voltage_dependency_entry *entries; }; -struct radeon_cac_leakage_entry { - u16 vddc; - u32 leakage; +union radeon_cac_leakage_entry { + struct { + u16 vddc; + u32 leakage; + }; + struct { + u16 vddc1; + u16 vddc2; + u16 vddc3; + }; }; struct radeon_cac_leakage_table { u32 count; - struct radeon_cac_leakage_entry *entries; + union radeon_cac_leakage_entry *entries; }; struct radeon_phase_shedding_limits_entry { -- cgit v1.2.3 From 58cb7632df30698900a474cb85fa292bafd73b2e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 6 May 2013 12:15:33 -0400 Subject: drm/radeon/dpm: add support for parsing the atom powertune table Needed for DPM on CI. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 44 +++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon.h | 12 +++++++++++ 2 files changed, 56 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index ccdf770dd770..e6905f011bfa 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1014,6 +1014,48 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.ppm_table->tj_max = le32_to_cpu(ppm->ulTjmax); } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && + ext_hdr->usPowerTuneTableOffset) { + u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + ATOM_PowerTune_Table *pt; + rdev->pm.dpm.dyn_state.cac_tdp_table = + kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); + kfree(rdev->pm.dpm.dyn_state.ppm_table); + return -ENOMEM; + } + if (rev > 0) { + ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = + ppt->usMaximumPowerDeliveryLimit; + pt = &ppt->power_tune_table; + } else { + ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; + pt = &ppt->power_tune_table; + } + rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); + rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = + le16_to_cpu(pt->usConfigurableTDP); + rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); + rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = + le16_to_cpu(pt->usBatteryPowerLimit); + rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = + le16_to_cpu(pt->usSmallPowerLimit); + rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = + le16_to_cpu(pt->usLowCACLeakage); + rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = + le16_to_cpu(pt->usHighCACLeakage); + } } return 0; @@ -1033,6 +1075,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); if (rdev->pm.dpm.dyn_state.ppm_table) kfree(rdev->pm.dpm.dyn_state.ppm_table); + if (rdev->pm.dpm.dyn_state.cac_tdp_table) + kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3376107f3b1e..5a624b8bbbb9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1308,6 +1308,17 @@ struct radeon_ppm_table { u32 tj_max; }; +struct radeon_cac_tdp_table { + u16 tdp; + u16 configurable_tdp; + u16 tdc; + u16 battery_power_limit; + u16 small_power_limit; + u16 low_cac_leakage; + u16 high_cac_leakage; + u16 maximum_power_delivery_limit; +}; + struct radeon_dpm_dynamic_state { struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk; struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; @@ -1325,6 +1336,7 @@ struct radeon_dpm_dynamic_state { struct radeon_cac_leakage_table cac_leakage_table; struct radeon_phase_shedding_limits_table phase_shedding_limits_table; struct radeon_ppm_table *ppm_table; + struct radeon_cac_tdp_table *cac_tdp_table; }; struct radeon_dpm_fan { -- cgit v1.2.3 From dd621a22cf43bbe0bfde8ab6a1d59b7138eab998 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 6 May 2013 14:37:56 -0400 Subject: drm/radeon/dpm: grab mvdd_dependency_on_mclk info from vbios Required for dpm on CI. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 19 +++++++++++++++++++ drivers/gpu/drm/radeon/radeon.h | 1 + 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e6905f011bfa..9dda73507384 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -877,6 +877,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) return ret; } } + if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); + ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + dep_table); + if (ret) { + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + return ret; + } + } if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = (ATOM_PPLIB_Clock_Voltage_Limit_Table *) @@ -909,6 +922,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); return -ENOMEM; } @@ -953,6 +967,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); return -ENOMEM; } for (i = 0; i < cac_table->ucNumEntries; i++) { @@ -991,6 +1006,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); return -ENOMEM; } @@ -1025,6 +1041,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); kfree(rdev->pm.dpm.dyn_state.ppm_table); return -ENOMEM; @@ -1069,6 +1086,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5a624b8bbbb9..ca1e1472a203 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1323,6 +1323,7 @@ struct radeon_dpm_dynamic_state { struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk; struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk; + struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk; struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; struct radeon_clock_array valid_sclk_values; -- cgit v1.2.3 From d29f013b205d7927c625dec5850a599a9f76cb08 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 16:37:28 -0400 Subject: drm/radeon: add structs to store vce clock voltage deps Used for vce power management. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ca1e1472a203..636c040c9677 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1295,6 +1295,17 @@ struct radeon_uvd_clock_voltage_dependency_table { struct radeon_uvd_clock_voltage_dependency_entry *entries; }; +struct radeon_vce_clock_voltage_dependency_entry { + u32 ecclk; + u32 evclk; + u16 v; +}; + +struct radeon_vce_clock_voltage_dependency_table { + u8 count; + struct radeon_vce_clock_voltage_dependency_entry *entries; +}; + struct radeon_ppm_table { u8 ppm_design; u16 cpu_core_number; @@ -1326,6 +1337,7 @@ struct radeon_dpm_dynamic_state { struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk; struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; + struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; struct radeon_clock_array valid_sclk_values; struct radeon_clock_array valid_mclk_values; struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; -- cgit v1.2.3 From 94a914f51e45b8bd7e943327337f307dc72354b9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 16:42:33 -0400 Subject: drm/radeon: add clock voltage dep tables for acp, samu Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 636c040c9677..11e69770c015 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1338,6 +1338,8 @@ struct radeon_dpm_dynamic_state { struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table; struct radeon_clock_array valid_sclk_values; struct radeon_clock_array valid_mclk_values; struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; -- cgit v1.2.3 From 57ff476171f4065ab0312be0752f3439ee943ebe Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 17:04:27 -0400 Subject: drm/radeon: parse the vce clock voltage deps table Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 41 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 9dda73507384..b49b0f0795f0 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -989,12 +989,47 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) } } - /* ppm table */ + /* ext tables */ if (le16_to_cpu(power_info->pplib.usTableSize) >= sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && + ext_hdr->usVCETableOffset) { + VCEClockInfoArray *array = (VCEClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + array->ucNumEntries * sizeof(VCEClockInfo)); + u32 size = limits->numEntries * + sizeof(struct radeon_vce_clock_voltage_dependency_entry); + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); + return -ENOMEM; + } + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = + limits->numEntries; + for (i = 0; i < limits->numEntries; i++) { + VCEClockInfo *vce_clk = + &array->entries[limits->entries[i].ucVCEClockInfoIndex]; + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(limits->entries[i].usVoltage); + } + } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && ext_hdr->usPPMTableOffset) { ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) @@ -1008,6 +1043,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); + kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); return -ENOMEM; } rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; @@ -1044,6 +1080,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); kfree(rdev->pm.dpm.dyn_state.ppm_table); + kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); return -ENOMEM; } if (rev > 0) { @@ -1096,6 +1133,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.ppm_table); if (rdev->pm.dpm.dyn_state.cac_tdp_table) kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); + if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) + kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, -- cgit v1.2.3 From 018042b15b556807afd0393b285f001fce515151 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 17:14:11 -0400 Subject: drm/radeon: parse the uvd clock voltage deps table Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b49b0f0795f0..c103d3fd9428 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1030,6 +1030,42 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) le16_to_cpu(limits->entries[i].usVoltage); } } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && + ext_hdr->usUVDTableOffset) { + UVDClockInfoArray *array = (UVDClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); + u32 size = limits->numEntries * + sizeof(struct radeon_uvd_clock_voltage_dependency_entry); + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); + kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); + kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); + return -ENOMEM; + } + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = + limits->numEntries; + for (i = 0; i < limits->numEntries; i++) { + UVDClockInfo *uvd_clk = + &array->entries[limits->entries[i].ucUVDClockInfoIndex]; + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = + le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = + le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(limits->entries[i].usVoltage); + } + } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && ext_hdr->usPPMTableOffset) { ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) @@ -1044,6 +1080,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); + kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); return -ENOMEM; } rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; @@ -1081,6 +1118,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); kfree(rdev->pm.dpm.dyn_state.ppm_table); kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); + kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); return -ENOMEM; } if (rev > 0) { @@ -1135,6 +1173,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); + if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) + kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, -- cgit v1.2.3 From becfa6989b146a4696c20d19f0377a003be70ac5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 17:21:56 -0400 Subject: drm/radeon/dpm: clean up the extended table error pathes Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 40 ++++++--------------------------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index c103d3fd9428..98db6ea51ca7 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -919,10 +919,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) sizeof(struct radeon_phase_shedding_limits_entry), GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } @@ -964,10 +961,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } for (i = 0; i < cac_table->ucNumEntries; i++) { @@ -1010,11 +1004,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = @@ -1045,12 +1035,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = @@ -1074,13 +1059,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.ppm_table = kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.ppm_table) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); - kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; @@ -1111,14 +1090,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.cac_tdp_table = kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); - kfree(rdev->pm.dpm.dyn_state.ppm_table); - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); - kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); + r600_free_extended_power_table(rdev); return -ENOMEM; } if (rev > 0) { -- cgit v1.2.3 From 3cb928ff1e5bbb0bf03bb6aaa1b6052286565283 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 17:27:49 -0400 Subject: drm/radeon: parse the samu clock voltage deps table Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 98db6ea51ca7..314886ab011b 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1051,6 +1051,30 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) le16_to_cpu(limits->entries[i].usVoltage); } } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && + ext_hdr->usSAMUTableOffset) { + ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); + u32 size = limits->numEntries * + sizeof(struct radeon_clock_voltage_dependency_entry); + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { + r600_free_extended_power_table(rdev); + return -ENOMEM; + } + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = + limits->numEntries; + for (i = 0; i < limits->numEntries; i++) { + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(limits->entries[i].usSAMClockLow) | + (limits->entries[i].ucSAMClockHigh << 16); + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(limits->entries[i].usVoltage); + } + } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && ext_hdr->usPPMTableOffset) { ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) @@ -1147,6 +1171,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); + if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) + kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, -- cgit v1.2.3 From 96d2af2150c1e3326ea1163db1210ba4139f190a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 May 2013 17:34:45 -0400 Subject: drm/radeon: parse the acp clock voltage deps table Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 314886ab011b..bf851ac5491a 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1106,6 +1106,30 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.ppm_table->tj_max = le32_to_cpu(ppm->ulTjmax); } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && + ext_hdr->usACPTableOffset) { + ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usACPTableOffset) + 1); + u32 size = limits->numEntries * + sizeof(struct radeon_clock_voltage_dependency_entry); + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { + r600_free_extended_power_table(rdev); + return -ENOMEM; + } + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = + limits->numEntries; + for (i = 0; i < limits->numEntries; i++) { + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(limits->entries[i].usACPClockLow) | + (limits->entries[i].ucACPClockHigh << 16); + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(limits->entries[i].usVoltage); + } + } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && ext_hdr->usPowerTuneTableOffset) { u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + @@ -1173,6 +1197,8 @@ void r600_free_extended_power_table(struct radeon_device *rdev) kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); + if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) + kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, -- cgit v1.2.3 From 4df5ac2652d1ab9c15b7b11dbbd285157bcb5ee5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 May 2013 17:44:31 -0400 Subject: drm/radeon: add r600_get_pcie_lane_support helper Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/radeon/r600_dpm.h | 4 ++++ 2 files changed, 27 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index bf851ac5491a..34ea5d6ee4be 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1223,3 +1223,26 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, } return RADEON_PCIE_GEN1; } + +u16 r600_get_pcie_lane_support(struct radeon_device *rdev, + u16 asic_lanes, + u16 default_lanes) +{ + switch (asic_lanes) { + case 0: + default: + return default_lanes; + case 1: + return 1; + case 2: + return 2; + case 4: + return 4; + case 8: + return 8; + case 12: + return 12; + case 16: + return 16; + } +} diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 7c822d9ae53d..71d5d93c371b 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h @@ -224,4 +224,8 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, enum radeon_pcie_gen asic_gen, enum radeon_pcie_gen default_gen); +u16 r600_get_pcie_lane_support(struct radeon_device *rdev, + u16 asic_lanes, + u16 default_lanes); + #endif -- cgit v1.2.3 From c4453e66130119d9f2947ee051d598aae6823d49 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 15 May 2013 15:53:57 -0400 Subject: drm/radeon/dpm: add vce clocks to radeon_ps Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 11e69770c015..b6bac497f001 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1206,6 +1206,9 @@ struct radeon_ps { /* UVD clocks */ u32 vclk; u32 dclk; + /* VCE clocks */ + u32 evclk; + u32 ecclk; /* asic priv */ void *ps_priv; }; -- cgit v1.2.3 From 61fb192a1cf87413e19f565495595c8c116d7c10 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 15 May 2013 17:25:03 -0400 Subject: drm/radeon/dpm: add a helper to encode pcie lane setting convert from number of lanes to register setting. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 10 ++++++++++ drivers/gpu/drm/radeon/r600_dpm.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 34ea5d6ee4be..89c46c55f93d 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1246,3 +1246,13 @@ u16 r600_get_pcie_lane_support(struct radeon_device *rdev, return 16; } } + +u8 r600_encode_pci_lane_width(u32 lanes) +{ + u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; + + if (lanes > 16) + return 0; + + return encoded_lanes[lanes]; +} diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 71d5d93c371b..8dc1fbd9dbf1 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h @@ -227,5 +227,6 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, u16 r600_get_pcie_lane_support(struct radeon_device *rdev, u16 asic_lanes, u16 default_lanes); +u8 r600_encode_pci_lane_width(u32 lanes); #endif -- cgit v1.2.3 From 6bb5c0d74c1962a8b1c722521c01e19d38c47370 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 19 Jul 2013 12:42:08 -0400 Subject: drm/radeon/dpm: add helper to fetch the vrefresh of the current mode Needed for DPM on CI. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 18 ++++++++++++++++++ drivers/gpu/drm/radeon/r600_dpm.h | 1 + 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 89c46c55f93d..26a787836032 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) return vblank_time_us; } +u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 vrefresh = 0; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { + vrefresh = radeon_crtc->hw_mode.vrefresh; + break; + } + } + + return vrefresh; +} + void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u) { diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 8dc1fbd9dbf1..1000bf9719f2 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h @@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps); void r600_dpm_print_ps_status(struct radeon_device *rdev, struct radeon_ps *rps); u32 r600_dpm_get_vblank_time(struct radeon_device *rdev); +u32 r600_dpm_get_vrefresh(struct radeon_device *rdev); bool r600_is_uvd_state(u32 class, u32 class2); void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u); -- cgit v1.2.3 From 41a524abff2630dce0f9c38eb7340fbf2dc5bf27 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 01:01:40 -0400 Subject: drm/radeon/kms: add dpm support for KB/KV This adds dpm support for KB/KV asics. This includes: - dynamic engine clock scaling - dynamic voltage scaling - power containment - shader power scaling Set radeon.dpm=1 to enable. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/cik.c | 30 +- drivers/gpu/drm/radeon/cikd.h | 72 + drivers/gpu/drm/radeon/kv_dpm.c | 2536 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/kv_dpm.h | 199 +++ drivers/gpu/drm/radeon/kv_smc.c | 207 +++ drivers/gpu/drm/radeon/ppsmc.h | 34 + drivers/gpu/drm/radeon/radeon_asic.c | 14 + drivers/gpu/drm/radeon/radeon_asic.h | 14 + drivers/gpu/drm/radeon/radeon_pm.c | 2 + drivers/gpu/drm/radeon/smu7.h | 170 +++ drivers/gpu/drm/radeon/smu7_fusion.h | 300 ++++ 12 files changed, 3576 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/radeon/kv_dpm.c create mode 100644 drivers/gpu/drm/radeon/kv_dpm.h create mode 100644 drivers/gpu/drm/radeon/kv_smc.c create mode 100644 drivers/gpu/drm/radeon/smu7.h create mode 100644 drivers/gpu/drm/radeon/smu7_fusion.h diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index bfabd69b4e39..d3265b5d4661 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -79,7 +79,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ - trinity_smc.o ni_dpm.o si_smc.o si_dpm.o + trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index d0804f79efed..87e5aeed6e88 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6593,6 +6593,7 @@ int cik_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 dma_cntl, dma_cntl1; + u32 thermal_int; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -6625,6 +6626,9 @@ int cik_irq_set(struct radeon_device *rdev) cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & + ~(THERM_INTH_MASK | THERM_INTL_MASK); + /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("cik_irq_set: sw int gfx\n"); @@ -6782,6 +6786,11 @@ int cik_irq_set(struct radeon_device *rdev) hpd6 |= DC_HPDx_INT_EN; } + if (rdev->irq.dpm_thermal) { + DRM_DEBUG("dpm thermal\n"); + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + } + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); @@ -6816,6 +6825,8 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + return 0; } @@ -7027,6 +7038,7 @@ int cik_irq_process(struct radeon_device *rdev) bool queue_hotplug = false; bool queue_reset = false; u32 addr, status, mc_client; + bool queue_thermal = false; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -7377,6 +7389,19 @@ restart_ih: break; } break; + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + rdev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + rdev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + case 233: /* GUI IDLE */ + DRM_DEBUG("IH: GUI idle\n"); + break; case 241: /* SDMA Privileged inst */ case 247: /* SDMA Privileged inst */ DRM_ERROR("Illegal instruction in SDMA command stream\n"); @@ -7416,9 +7441,6 @@ restart_ih: break; } break; - case 233: /* GUI IDLE */ - DRM_DEBUG("IH: GUI idle\n"); - break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -7432,6 +7454,8 @@ restart_ih: schedule_work(&rdev->hotplug_work); if (queue_reset) schedule_work(&rdev->reset_work); + if (queue_thermal) + schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 65886caaf756..179ca3625ae4 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -28,10 +28,59 @@ #define CIK_RB_BITMAP_WIDTH_PER_SH 2 +/* DIDT IND registers */ +#define DIDT_SQ_CTRL0 0x0 +# define DIDT_CTRL_EN (1 << 0) +#define DIDT_DB_CTRL0 0x20 +#define DIDT_TD_CTRL0 0x40 +#define DIDT_TCP_CTRL0 0x60 + /* SMC IND registers */ +#define NB_DPM_CONFIG_1 0x3F9E8 +# define Dpm0PgNbPsLo(x) ((x) << 0) +# define Dpm0PgNbPsLo_MASK 0x000000ff +# define Dpm0PgNbPsLo_SHIFT 0 +# define Dpm0PgNbPsHi(x) ((x) << 8) +# define Dpm0PgNbPsHi_MASK 0x0000ff00 +# define Dpm0PgNbPsHi_SHIFT 8 +# define DpmXNbPsLo(x) ((x) << 16) +# define DpmXNbPsLo_MASK 0x00ff0000 +# define DpmXNbPsLo_SHIFT 16 +# define DpmXNbPsHi(x) ((x) << 24) +# define DpmXNbPsHi_MASK 0xff000000 +# define DpmXNbPsHi_SHIFT 24 + +#define SMC_SYSCON_MSG_ARG_0 0x80000068 + #define GENERAL_PWRMGT 0xC0200000 +# define GLOBAL_PWRMGT_EN (1 << 0) # define GPU_COUNTER_CLK (1 << 15) +#define SCLK_PWRMGT_CNTL 0xC0200008 +# define RESET_BUSY_CNT (1 << 4) +# define RESET_SCLK_CNT (1 << 5) +# define DYNAMIC_PM_EN (1 << 21) + +#define CG_FTV_0 0xC02001A8 + +#define LCAC_SX0_OVR_SEL 0xC0400D04 +#define LCAC_SX0_OVR_VAL 0xC0400D08 + +#define LCAC_MC0_OVR_SEL 0xC0400D34 +#define LCAC_MC0_OVR_VAL 0xC0400D38 + +#define LCAC_MC1_OVR_SEL 0xC0400D40 +#define LCAC_MC1_OVR_VAL 0xC0400D44 + +#define LCAC_MC2_OVR_SEL 0xC0400D4C +#define LCAC_MC2_OVR_VAL 0xC0400D50 + +#define LCAC_MC3_OVR_SEL 0xC0400D58 +#define LCAC_MC3_OVR_VAL 0xC0400D5C + +#define LCAC_CPL_OVR_SEL 0xC0400D84 +#define LCAC_CPL_OVR_VAL 0xC0400D88 + #define CG_MULT_THERMAL_STATUS 0xC0300014 #define ASIC_MAX_TEMP(x) ((x) << 0) #define ASIC_MAX_TEMP_MASK 0x000001ff @@ -60,6 +109,16 @@ # define ZCLK_SEL(x) ((x) << 8) # define ZCLK_SEL_MASK 0xFF00 +#define CG_THERMAL_INT_CTRL 0xC2100028 +#define DIG_THERM_INTH(x) ((x) << 0) +#define DIG_THERM_INTH_MASK 0x000000FF +#define DIG_THERM_INTH_SHIFT 0 +#define DIG_THERM_INTL(x) ((x) << 8) +#define DIG_THERM_INTL_MASK 0x0000FF00 +#define DIG_THERM_INTL_SHIFT 8 +#define THERM_INTH_MASK (1 << 24) +#define THERM_INTL_MASK (1 << 25) + /* PCIE registers idx/data 0x38/0x3c */ #define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */ # define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7) @@ -173,6 +232,19 @@ #define PCIE_INDEX 0x38 #define PCIE_DATA 0x3C +#define SMC_IND_INDEX_0 0x200 +#define SMC_IND_DATA_0 0x204 + +#define SMC_IND_ACCESS_CNTL 0x240 +#define AUTO_INCREMENT_IND_0 (1 << 0) + +#define SMC_MESSAGE_0 0x250 +#define SMC_MSG_MASK 0xffff +#define SMC_RESP_0 0x254 +#define SMC_RESP_MASK 0xffff + +#define SMC_MSG_ARG_0 0x290 + #define VGA_HDP_CONTROL 0x328 #define VGA_MEMORY_DISABLE (1 << 4) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c new file mode 100644 index 000000000000..2e4016356dab --- /dev/null +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -0,0 +1,2536 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "radeon.h" +#include "cikd.h" +#include "r600_dpm.h" +#include "kv_dpm.h" + +#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define KV_MINIMUM_ENGINE_CLOCK 800 +#define SMC_RAM_END 0x40000 + +static void kv_init_graphics_levels(struct radeon_device *rdev); +static int kv_calculate_ds_divider(struct radeon_device *rdev); +static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); +static int kv_calculate_dpm_settings(struct radeon_device *rdev); +static void kv_enable_new_levels(struct radeon_device *rdev); +static void kv_program_nbps_index_settings(struct radeon_device *rdev, + struct radeon_ps *new_rps); +static int kv_set_enabled_levels(struct radeon_device *rdev); +static int kv_force_dpm_lowest(struct radeon_device *rdev); +static void kv_apply_state_adjust_rules(struct radeon_device *rdev, + struct radeon_ps *new_rps, + struct radeon_ps *old_rps); +static int kv_set_thermal_temperature_range(struct radeon_device *rdev, + int min_temp, int max_temp); +static int kv_init_fps_limits(struct radeon_device *rdev); + +static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); +static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); +static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); +static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); + +extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); +extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); +extern void cik_update_cg(struct radeon_device *rdev, + u32 block, bool enable); + +static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 2 }, + { 4, 1, 1 }, + { 5, 5, 2 }, + { 6, 6, 1 }, + { 7, 9, 2 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 1 }, + { 4, 1, 1 }, + { 5, 5, 1 }, + { 6, 6, 1 }, + { 7, 9, 1 }, + { 8, 4, 1 }, + { 9, 2, 1 }, + { 10, 3, 1 }, + { 11, 6, 1 }, + { 12, 8, 2 }, + { 13, 1, 1 }, + { 14, 2, 1 }, + { 15, 3, 1 }, + { 16, 1, 1 }, + { 17, 4, 1 }, + { 18, 3, 1 }, + { 19, 1, 1 }, + { 20, 8, 1 }, + { 21, 5, 1 }, + { 22, 1, 1 }, + { 23, 1, 1 }, + { 24, 4, 1 }, + { 27, 6, 1 }, + { 28, 1, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_reg sx0_cac_config_reg[] = +{ + { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc0_cac_config_reg[] = +{ + { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc1_cac_config_reg[] = +{ + { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc2_cac_config_reg[] = +{ + { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc3_cac_config_reg[] = +{ + { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg cpl_cac_config_reg[] = +{ + { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_pt_config_reg didt_config_kv[] = +{ + { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + +static struct kv_ps *kv_get_ps(struct radeon_ps *rps) +{ + struct kv_ps *ps = rps->ps_priv; + + return ps; +} + +static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) +{ + struct kv_power_info *pi = rdev->pm.dpm.priv; + + return pi; +} + +#if 0 +static void kv_program_local_cac_table(struct radeon_device *rdev, + const struct kv_lcac_config_values *local_cac_table, + const struct kv_lcac_config_reg *local_cac_reg) +{ + u32 i, count, data; + const struct kv_lcac_config_values *values = local_cac_table; + + while (values->block_id != 0xffffffff) { + count = values->signal_id; + for (i = 0; i < count; i++) { + data = ((values->block_id << local_cac_reg->block_shift) & + local_cac_reg->block_mask); + data |= ((i << local_cac_reg->signal_shift) & + local_cac_reg->signal_mask); + data |= ((values->t << local_cac_reg->t_shift) & + local_cac_reg->t_mask); + data |= ((1 << local_cac_reg->enable_shift) & + local_cac_reg->enable_mask); + WREG32_SMC(local_cac_reg->cntl, data); + } + values++; + } +} +#endif + +static int kv_program_pt_config_registers(struct radeon_device *rdev, + const struct kv_pt_config_reg *cac_config_regs) +{ + const struct kv_pt_config_reg *config_regs = cac_config_regs; + u32 data; + u32 cache = 0; + + if (config_regs == NULL) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == KV_CONFIGREG_CACHE) { + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + } else { + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + data = RREG32_SMC(config_regs->offset); + break; + case KV_CONFIGREG_DIDT_IND: + data = RREG32_DIDT(config_regs->offset); + break; + default: + data = RREG32(config_regs->offset << 2); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + cache = 0; + + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + WREG32_SMC(config_regs->offset, data); + break; + case KV_CONFIGREG_DIDT_IND: + WREG32_DIDT(config_regs->offset, data); + break; + default: + WREG32(config_regs->offset << 2, data); + break; + } + } + config_regs++; + } + + return 0; +} + +static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 data; + + if (pi->caps_sq_ramping) { + data = RREG32_DIDT(DIDT_SQ_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_SQ_CTRL0, data); + } + + if (pi->caps_db_ramping) { + data = RREG32_DIDT(DIDT_DB_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_DB_CTRL0, data); + } + + if (pi->caps_td_ramping) { + data = RREG32_DIDT(DIDT_TD_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_TD_CTRL0, data); + } + + if (pi->caps_tcp_ramping) { + data = RREG32_DIDT(DIDT_TCP_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_TCP_CTRL0, data); + } +} + +static int kv_enable_didt(struct radeon_device *rdev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + if (pi->caps_sq_ramping || + pi->caps_db_ramping || + pi->caps_td_ramping || + pi->caps_tcp_ramping) { + cik_enter_rlc_safe_mode(rdev); + + if (enable) { + ret = kv_program_pt_config_registers(rdev, didt_config_kv); + if (ret) { + cik_exit_rlc_safe_mode(rdev); + return ret; + } + } + + kv_do_enable_didt(rdev, enable); + + cik_exit_rlc_safe_mode(rdev); + } + + return 0; +} + +#if 0 +static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->caps_cac) { + WREG32_SMC(LCAC_SX0_OVR_SEL, 0); + WREG32_SMC(LCAC_SX0_OVR_VAL, 0); + kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); + + WREG32_SMC(LCAC_MC0_OVR_SEL, 0); + WREG32_SMC(LCAC_MC0_OVR_VAL, 0); + kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); + + WREG32_SMC(LCAC_MC1_OVR_SEL, 0); + WREG32_SMC(LCAC_MC1_OVR_VAL, 0); + kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); + + WREG32_SMC(LCAC_MC2_OVR_SEL, 0); + WREG32_SMC(LCAC_MC2_OVR_VAL, 0); + kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); + + WREG32_SMC(LCAC_MC3_OVR_SEL, 0); + WREG32_SMC(LCAC_MC3_OVR_VAL, 0); + kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); + + WREG32_SMC(LCAC_CPL_OVR_SEL, 0); + WREG32_SMC(LCAC_CPL_OVR_VAL, 0); + kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); + } +} +#endif + +static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret = 0; + + if (pi->caps_cac) { + if (enable) { + ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); + if (ret) + pi->cac_enabled = false; + else + pi->cac_enabled = true; + } else if (pi->cac_enabled) { + kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); + pi->cac_enabled = false; + } + } + + return ret; +} + +static int kv_process_firmware_header(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 tmp; + int ret; + + ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, pi->sram_end); + + if (ret == 0) + pi->dpm_table_start = tmp; + + ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, pi->sram_end); + + if (ret == 0) + pi->soft_regs_start = tmp; + + return ret; +} + +static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + pi->graphics_voltage_change_enable = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), + &pi->graphics_voltage_change_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_interval(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + pi->graphics_interval = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), + &pi->graphics_interval, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_boot_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), + &pi->graphics_boot_level, + sizeof(u8), pi->sram_end); + + return ret; +} + +static void kv_program_vc(struct radeon_device *rdev) +{ + WREG32_SMC(CG_FTV_0, 0x3FFFC000); +} + +static void kv_clear_vc(struct radeon_device *rdev) +{ + WREG32_SMC(CG_FTV_0, 0); +} + +static int kv_set_divider_value(struct radeon_device *rdev, + u32 index, u32 sclk) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct atom_clock_dividers dividers; + int ret; + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + sclk, false, ÷rs); + if (ret) + return ret; + + pi->graphics_level[index].SclkDid = (u8)dividers.post_div; + pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); + + return 0; +} + +static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, + u16 voltage) +{ + return 6200 - (voltage * 25); +} + +static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, + u32 vid_2bit) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, + &pi->sys_info.vid_mapping_table, + vid_2bit); + + return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); +} + + +static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; + pi->graphics_level[index].MinVddNb = + cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); + + return 0; +} + +static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->graphics_level[index].AT = cpu_to_be16((u16)at); + + return 0; +} + +static void kv_dpm_power_level_enable(struct radeon_device *rdev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; +} + +static void kv_start_dpm(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(GENERAL_PWRMGT); + + tmp |= GLOBAL_PWRMGT_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); + + kv_smc_dpm_enable(rdev, true); +} + +static void kv_stop_dpm(struct radeon_device *rdev) +{ + kv_smc_dpm_enable(rdev, false); +} + +static void kv_start_am(struct radeon_device *rdev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); + sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; + + WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static void kv_reset_am(struct radeon_device *rdev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); + + WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) +{ + return kv_notify_message_to_smu(rdev, freeze ? + PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); +} + +static int kv_force_lowest_valid(struct radeon_device *rdev) +{ + return kv_force_dpm_lowest(rdev); +} + +static int kv_unforce_levels(struct radeon_device *rdev) +{ + return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); +} + +static int kv_update_sclk_t(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 low_sclk_interrupt_t = 0; + int ret = 0; + + if (pi->caps_sclk_throttle_low_notification) { + low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), + (u8 *)&low_sclk_interrupt_t, + sizeof(u32), pi->sram_end); + } + return ret; +} + +static int kv_program_bootup_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { + if ((table->entries[i].clk == pi->boot_pl.sclk) || + (i == 0)) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(rdev, i, true); + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + if (table->num_max_dpm_entries == 0) + return -EINVAL; + + for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { + if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || + (i == 0)) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(rdev, i, true); + } + return 0; +} + +static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + pi->graphics_therm_throttle_enable = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), + &pi->graphics_therm_throttle_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_upload_dpm_settings(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), + (u8 *)&pi->graphics_level, + sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, + pi->sram_end); + + if (ret) + return ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), + &pi->graphics_dpm_level_count, + sizeof(u8), pi->sram_end); + + return ret; +} + +static u32 kv_get_clock_difference(u32 a, u32 b) +{ + return (a >= b) ? a - b : b - a; +} + +static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 value; + + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(clk, 40000) < 200) + value = 3; + else if (kv_get_clock_difference(clk, 30000) < 200) + value = 2; + else if (kv_get_clock_difference(clk, 20000) < 200) + value = 7; + else if (kv_get_clock_difference(clk, 15000) < 200) + value = 6; + else if (kv_get_clock_difference(clk, 10000) < 200) + value = 8; + else + value = 0; + } else { + value = 0; + } + + return value; +} + +static int kv_populate_uvd_table(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_uvd_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->uvd_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < table->entries[i].v)) + break; + + pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); + pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); + pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); + + pi->uvd_level[i].VClkBypassCntl = + (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); + pi->uvd_level[i].DClkBypassCntl = + (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].vclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].dclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; + + pi->uvd_level_count++; + } + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), + (u8 *)&pi->uvd_level_count, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + pi->uvd_interval = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UVDInterval), + &pi->uvd_interval, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevel), + (u8 *)&pi->uvd_level, + sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, + pi->sram_end); + + return ret; + +} + +static int kv_populate_vce_table(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + u32 i; + struct radeon_vce_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + + if (table == NULL || table->count == 0) + return 0; + + pi->vce_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); + pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->vce_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].evclk, false, ÷rs); + if (ret) + return ret; + pi->vce_level[i].Divider = (u8)dividers.post_div; + + pi->vce_level_count++; + } + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevelCount), + (u8 *)&pi->vce_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->vce_interval = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VCEInterval), + (u8 *)&pi->vce_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevel), + (u8 *)&pi->vce_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, + pi->sram_end); + + return ret; +} + +static int kv_populate_samu_table(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->samu_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->samu_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->samu_level[i].Divider = (u8)dividers.post_div; + + pi->samu_level_count++; + } + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), + (u8 *)&pi->samu_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->samu_interval = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SAMUInterval), + (u8 *)&pi->samu_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevel), + (u8 *)&pi->samu_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + + +static int kv_populate_acp_table(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->acp_level_count = 0; + for (i = 0; i < table->count; i++) { + pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->acp_level[i].Divider = (u8)dividers.post_div; + + pi->acp_level_count++; + } + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), + (u8 *)&pi->acp_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->acp_interval = 1; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, ACPInterval), + (u8 *)&pi->acp_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevel), + (u8 *)&pi->acp_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + +static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } +} + +static int kv_enable_ulv(struct radeon_device *rdev, bool enable) +{ + return kv_notify_message_to_smu(rdev, enable ? + PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); +} + +static void kv_update_current_ps(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->current_rps = *rps; + pi->current_ps = *new_ps; + pi->current_rps.ps_priv = &pi->current_ps; +} + +static void kv_update_requested_ps(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->requested_rps = *rps; + pi->requested_ps = *new_ps; + pi->requested_rps.ps_priv = &pi->requested_ps; +} + +int kv_dpm_enable(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + ret = kv_process_firmware_header(rdev); + if (ret) { + DRM_ERROR("kv_process_firmware_header failed\n"); + return ret; + } + kv_init_fps_limits(rdev); + kv_init_graphics_levels(rdev); + ret = kv_program_bootup_state(rdev); + if (ret) { + DRM_ERROR("kv_program_bootup_state failed\n"); + return ret; + } + kv_calculate_dfs_bypass_settings(rdev); + ret = kv_upload_dpm_settings(rdev); + if (ret) { + DRM_ERROR("kv_upload_dpm_settings failed\n"); + return ret; + } + ret = kv_populate_uvd_table(rdev); + if (ret) { + DRM_ERROR("kv_populate_uvd_table failed\n"); + return ret; + } + ret = kv_populate_vce_table(rdev); + if (ret) { + DRM_ERROR("kv_populate_vce_table failed\n"); + return ret; + } + ret = kv_populate_samu_table(rdev); + if (ret) { + DRM_ERROR("kv_populate_samu_table failed\n"); + return ret; + } + ret = kv_populate_acp_table(rdev); + if (ret) { + DRM_ERROR("kv_populate_acp_table failed\n"); + return ret; + } + kv_program_vc(rdev); +#if 0 + kv_initialize_hardware_cac_manager(rdev); +#endif + kv_start_am(rdev); + if (pi->enable_auto_thermal_throttling) { + ret = kv_enable_auto_thermal_throttling(rdev); + if (ret) { + DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + return ret; + } + } + ret = kv_enable_dpm_voltage_scaling(rdev); + if (ret) { + DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + return ret; + } + ret = kv_set_dpm_interval(rdev); + if (ret) { + DRM_ERROR("kv_set_dpm_interval failed\n"); + return ret; + } + ret = kv_set_dpm_boot_state(rdev); + if (ret) { + DRM_ERROR("kv_set_dpm_boot_state failed\n"); + return ret; + } + ret = kv_enable_ulv(rdev, true); + if (ret) { + DRM_ERROR("kv_enable_ulv failed\n"); + return ret; + } + kv_start_dpm(rdev); + ret = kv_enable_didt(rdev, true); + if (ret) { + DRM_ERROR("kv_enable_didt failed\n"); + return ret; + } + ret = kv_enable_smc_cac(rdev, true); + if (ret) { + DRM_ERROR("kv_enable_smc_cac failed\n"); + return ret; + } + + if (rdev->irq.installed && + r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { + ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) { + DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + return ret; + } + rdev->irq.dpm_thermal = true; + radeon_irq_set(rdev); + } + + /* powerdown unused blocks for now */ + kv_dpm_powergate_acp(rdev, true); + kv_dpm_powergate_samu(rdev, true); + kv_dpm_powergate_vce(rdev, true); + + kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); + + return ret; +} + +void kv_dpm_disable(struct radeon_device *rdev) +{ + kv_enable_smc_cac(rdev, false); + kv_enable_didt(rdev, false); + kv_clear_vc(rdev); + kv_stop_dpm(rdev); + kv_enable_ulv(rdev, false); + kv_reset_am(rdev); + + kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); +} + +#if 0 +static int kv_write_smc_soft_register(struct radeon_device *rdev, + u16 reg_offset, u32 value) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, + (u8 *)&value, sizeof(u16), pi->sram_end); +} + +static int kv_read_smc_soft_register(struct radeon_device *rdev, + u16 reg_offset, u32 *value) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} +#endif + +static void kv_init_sclk_t(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->low_sclk_interrupt_t = 0; +} + +static int kv_init_fps_limits(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret = 0; + + if (pi->caps_fps) { + u16 tmp; + + tmp = 45; + pi->fps_high_t = cpu_to_be16(tmp); + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsHighT), + (u8 *)&pi->fps_high_t, + sizeof(u16), pi->sram_end); + + tmp = 30; + pi->fps_low_t = cpu_to_be16(tmp); + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsLowT), + (u8 *)&pi->fps_low_t, + sizeof(u16), pi->sram_end); + + } + return ret; +} + +static void kv_init_powergate_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->uvd_power_gated = false; + pi->vce_power_gated = false; + pi->samu_power_gated = false; + pi->acp_power_gated = false; + +} + +static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) +{ + return kv_notify_message_to_smu(rdev, enable ? + PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); +} + +#if 0 +static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) +{ + return kv_notify_message_to_smu(rdev, enable ? + PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); +} +#endif + +static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) +{ + return kv_notify_message_to_smu(rdev, enable ? + PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); +} + +static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) +{ + return kv_notify_message_to_smu(rdev, enable ? + PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); +} + +static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_uvd_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) + pi->uvd_boot_level = table->count - 1; + else + pi->uvd_boot_level = 0; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), + (uint8_t *)&pi->uvd_boot_level, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + if (!pi->caps_uvd_dpm || + pi->caps_stable_p_state) + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (1 << pi->uvd_boot_level)); + } + + return kv_enable_uvd_dpm(rdev, !gate); +} + +#if 0 +static u8 kv_get_vce_boot_level(struct radeon_device *rdev) +{ + u8 i; + struct radeon_vce_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].evclk >= 0) /* XXX */ + break; + } + + return i; +} + +static int kv_update_vce_dpm(struct radeon_device *rdev, + struct radeon_ps *radeon_new_state, + struct radeon_ps *radeon_current_state) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_vce_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + int ret; + + if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { + if (pi->caps_stable_p_state) + pi->vce_boot_level = table->count - 1; + else + pi->vce_boot_level = kv_get_vce_boot_level(rdev); + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceBootLevel), + (u8 *)&pi->vce_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (1 << pi->vce_boot_level)); + + kv_enable_vce_dpm(rdev, true); + } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { + kv_enable_vce_dpm(rdev, false); + } + + return 0; +} +#endif + +static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->samu_boot_level = table->count - 1; + else + pi->samu_boot_level = 0; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), + (u8 *)&pi->samu_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (1 << pi->samu_boot_level)); + } + + return kv_enable_samu_dpm(rdev, !gate); +} + +static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->acp_boot_level = table->count - 1; + else + pi->acp_boot_level = 0; + + ret = kv_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), + (u8 *)&pi->acp_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + + return kv_enable_acp_dpm(rdev, !gate); +} + +static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->uvd_power_gated == gate) + return; + + pi->uvd_power_gated = gate; + + if (gate) { + kv_update_uvd_dpm(rdev, true); + if (pi->caps_uvd_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); + } else { + if (pi->caps_uvd_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); + kv_update_uvd_dpm(rdev, false); + } +} + +static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->vce_power_gated == gate) + return; + + pi->vce_power_gated = gate; + + if (gate) { + if (pi->caps_vce_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); + } +} + +static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->samu_power_gated == gate) + return; + + pi->samu_power_gated = gate; + + if (gate) { + kv_update_samu_dpm(rdev, true); + if (pi->caps_samu_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); + } else { + if (pi->caps_samu_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); + kv_update_samu_dpm(rdev, false); + } +} + +static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->acp_power_gated == gate) + return; + + if (rdev->family == CHIP_KABINI) + return; + + pi->acp_power_gated = gate; + + if (gate) { + kv_update_acp_dpm(rdev, true); + if (pi->caps_acp_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); + } else { + if (pi->caps_acp_pg) + kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); + kv_update_acp_dpm(rdev, false); + } +} + +static void kv_set_valid_clock_range(struct radeon_device *rdev, + struct radeon_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if ((table->entries[i].clk >= new_ps->levels[0].sclk) || + (i == (pi->graphics_dpm_level_count - 1))) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { + if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || + (i == 0)) { + pi->highest_valid = i; + break; + } + } + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > + (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { + if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || + i == (int)(pi->graphics_dpm_level_count - 1)) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { + if (table->entries[i].sclk_frequency <= + new_ps->levels[new_ps->num_levels - 1].sclk || + i == 0) { + pi->highest_valid = i; + break; + } + } + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - + table->entries[pi->highest_valid].sclk_frequency) > + (table->entries[pi->lowest_valid].sclk_frequency - + new_ps->levels[new_ps->num_levels -1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } +} + +static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, + struct radeon_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(rdev); + int ret = 0; + u8 clk_bypass_cntl; + + if (pi->caps_enable_dfs_bypass) { + clk_bypass_cntl = new_ps->need_dfs_bypass ? + pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; + ret = kv_copy_bytes_to_smc(rdev, + (pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + + (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + + offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), + &clk_bypass_cntl, + sizeof(u8), pi->sram_end); + } + + return ret; +} + +static int kv_enable_nb_dpm(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret = 0; + + if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { + ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); + if (ret == 0) + pi->nb_dpm_enabled = true; + } + + return ret; +} + +int kv_dpm_pre_set_power_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; + struct radeon_ps *new_ps = &requested_ps; + + kv_update_requested_ps(rdev, new_ps); + + kv_apply_state_adjust_rules(rdev, + &pi->requested_rps, + &pi->current_rps); + + return 0; +} + +int kv_dpm_set_power_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_ps *new_ps = &pi->requested_rps; + /*struct radeon_ps *old_ps = &pi->current_rps;*/ + int ret; + + if (rdev->family == CHIP_KABINI) { + if (pi->enable_dpm) { + kv_set_valid_clock_range(rdev, new_ps); + kv_update_dfs_bypass_settings(rdev, new_ps); + ret = kv_calculate_ds_divider(rdev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(rdev); + kv_calculate_dpm_settings(rdev); + kv_force_lowest_valid(rdev); + kv_enable_new_levels(rdev); + kv_upload_dpm_settings(rdev); + kv_program_nbps_index_settings(rdev, new_ps); + kv_unforce_levels(rdev); + kv_set_enabled_levels(rdev); + kv_force_lowest_valid(rdev); + kv_unforce_levels(rdev); +#if 0 + ret = kv_update_vce_dpm(rdev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } +#endif + kv_update_uvd_dpm(rdev, false); + kv_update_sclk_t(rdev); + } + } else { + if (pi->enable_dpm) { + kv_set_valid_clock_range(rdev, new_ps); + kv_update_dfs_bypass_settings(rdev, new_ps); + ret = kv_calculate_ds_divider(rdev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(rdev); + kv_calculate_dpm_settings(rdev); + kv_freeze_sclk_dpm(rdev, true); + kv_upload_dpm_settings(rdev); + kv_program_nbps_index_settings(rdev, new_ps); + kv_freeze_sclk_dpm(rdev, false); + kv_set_enabled_levels(rdev); +#if 0 + ret = kv_update_vce_dpm(rdev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } +#endif + kv_update_uvd_dpm(rdev, false); + kv_update_sclk_t(rdev); + kv_enable_nb_dpm(rdev); + } + } + return 0; +} + +void kv_dpm_post_set_power_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_ps *new_ps = &pi->requested_rps; + + kv_update_current_ps(rdev, new_ps); +} + +void kv_dpm_setup_asic(struct radeon_device *rdev) +{ + sumo_take_smu_control(rdev, true); + kv_init_powergate_state(rdev); + kv_init_sclk_t(rdev); +} + +void kv_dpm_reset_asic(struct radeon_device *rdev) +{ + kv_force_lowest_valid(rdev); + kv_init_graphics_levels(rdev); + kv_program_bootup_state(rdev); + kv_upload_dpm_settings(rdev); + kv_force_lowest_valid(rdev); + kv_unforce_levels(rdev); +} + +//XXX use sumo_dpm_display_configuration_changed + +static void kv_construct_max_power_limits_table(struct radeon_device *rdev, + struct radeon_clock_and_voltage_limits *table) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { + int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; + table->sclk = + pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; + table->vddc = + kv_convert_2bit_index_to_voltage(rdev, + pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); + } + + table->mclk = pi->sys_info.nbp_memory_clock[0]; +} + +static void kv_patch_voltage_values(struct radeon_device *rdev) +{ + int i; + struct radeon_uvd_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + + if (table->count) { + for (i = 0; i < table->count; i++) + table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + table->entries[i].v); + } + +} + +static void kv_construct_boot_state(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->boot_pl.sclk = pi->sys_info.bootup_sclk; + pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; + pi->boot_pl.ds_divider_index = 0; + pi->boot_pl.ss_divider_index = 0; + pi->boot_pl.allow_gnb_slow = 1; + pi->boot_pl.force_nbp_state = 0; + pi->boot_pl.display_wm = 0; + pi->boot_pl.vce_wm = 0; +} + +static int kv_force_dpm_lowest(struct radeon_device *rdev) +{ + int ret; + u32 enable_mask, i; + + ret = kv_dpm_get_enable_mask(rdev, &enable_mask); + if (ret) + return ret; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (enable_mask & (1 << i)) + break; + } + + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); +} + +static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, + u32 sclk, u32 min_sclk_in_sr) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + u32 temp; + u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? + min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; + + if (sclk < min) + return 0; + + if (!pi->caps_sclk_ds) + return 0; + + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { + temp = sclk / sumo_get_sleep_divider_from_id(i); + if ((temp >= min) || (i == 0)) + break; + } + + return (u8)i; +} + +static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + int i; + + if (table && table->count) { + for (i = table->count - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } + + *limit = 0; + return 0; +} + +static void kv_apply_state_adjust_rules(struct radeon_device *rdev, + struct radeon_ps *new_rps, + struct radeon_ps *old_rps) +{ + struct kv_ps *ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(rdev); + u32 min_sclk = 10000; /* ??? */ + u32 sclk, mclk = 0; + int i, limit; + bool force_high; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 stable_p_state_sclk = 0; + struct radeon_clock_and_voltage_limits *max_limits = + &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + mclk = max_limits->mclk; + sclk = min_sclk; + + if (pi->caps_stable_p_state) { + stable_p_state_sclk = (max_limits->sclk * 75) / 100; + + for (i = table->count - 1; i >= 0; i++) { + if (stable_p_state_sclk >= table->entries[i].clk) { + stable_p_state_sclk = table->entries[i].clk; + break; + } + } + + if (i > 0) + stable_p_state_sclk = table->entries[0].clk; + + sclk = stable_p_state_sclk; + } + + ps->need_dfs_bypass = true; + + for (i = 0; i < ps->num_levels; i++) { + if (ps->levels[i].sclk < sclk) + ps->levels[i].sclk = sclk; + } + + if (table && table->count) { + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(rdev, &limit); + ps->levels[i].sclk = table->entries[limit].clk; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(rdev, &limit); + ps->levels[i].sclk = table->entries[limit].sclk_frequency; + } + } + } + + if (pi->caps_stable_p_state) { + for (i = 0; i < ps->num_levels; i++) { + ps->levels[i].sclk = stable_p_state_sclk; + } + } + + pi->video_start = new_rps->dclk || new_rps->vclk; + + if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (rdev->family == CHIP_KABINI) { + ps->dpm0_pg_nb_ps_lo = 0x1; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x1; + ps->dpmx_nb_ps_hi = 0x0; + } else { + ps->dpm0_pg_nb_ps_lo = 0x1; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x2; + ps->dpmx_nb_ps_hi = 0x1; + + if (pi->sys_info.nb_dpm_enable && pi->battery_state) { + force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || + pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || + pi->disable_nb_ps3_in_battery; + ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpm0_pg_nb_ps_hi = 0x2; + ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpmx_nb_ps_hi = 0x2; + } + } +} + +static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; +} + +static int kv_calculate_ds_divider(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 sclk_in_sr = 10000; /* ??? */ + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].DeepSleepDivId = + kv_get_sleep_divider_id_from_clock(rdev, + be32_to_cpu(pi->graphics_level[i].SclkFrequency), + sclk_in_sr); + } + return 0; +} + +static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + bool force_high; + struct radeon_clock_and_voltage_limits *max_limits = + &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + u32 mclk = max_limits->mclk; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + if (rdev->family == CHIP_KABINI) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (!pi->sys_info.nb_dpm_enable) + return 0; + + force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || + (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); + + if (force_high) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].GnbSlow = 0; + } else { + if (pi->battery_state) + pi->graphics_level[0].ForceNbPs1 = 1; + + pi->graphics_level[1].GnbSlow = 0; + pi->graphics_level[2].GnbSlow = 0; + pi->graphics_level[3].GnbSlow = 0; + pi->graphics_level[4].GnbSlow = 0; + } + } else { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (pi->sys_info.nb_dpm_enable && pi->battery_state) { + pi->graphics_level[pi->lowest_valid].UpH = 0x28; + pi->graphics_level[pi->lowest_valid].GnbSlow = 0; + if (pi->lowest_valid != pi->highest_valid) + pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; + } + } + return 0; +} + +static int kv_calculate_dpm_settings(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; + + return 0; +} + +static void kv_init_graphics_levels(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + u32 vid_2bit; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) + break; + + kv_set_divider_value(rdev, i, table->entries[i].clk); + vid_2bit = sumo_convert_vid7_to_vid2(rdev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); + kv_set_vid(rdev, i, vid_2bit); + kv_set_at(rdev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(rdev, i, true); + pi->graphics_dpm_level_count++; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->num_max_dpm_entries; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < + kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) + break; + + kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); + kv_set_vid(rdev, i, table->entries[i].vid_2bit); + kv_set_at(rdev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(rdev, i, true); + pi->graphics_dpm_level_count++; + } + } + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + kv_dpm_power_level_enable(rdev, i, false); +} + +static void kv_enable_new_levels(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (i >= pi->lowest_valid && i <= pi->highest_valid) + kv_dpm_power_level_enable(rdev, i, true); + } +} + +static int kv_set_enabled_levels(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 i, new_mask = 0; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + new_mask |= (1 << i); + + return kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + +static void kv_program_nbps_index_settings(struct radeon_device *rdev, + struct radeon_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(rdev); + u32 nbdpmconfig1; + + if (rdev->family == CHIP_KABINI) + return; + + if (pi->sys_info.nb_dpm_enable) { + nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); + nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | + DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); + nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | + Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | + DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | + DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); + WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); + } +} + +static int kv_set_thermal_temperature_range(struct radeon_device *rdev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + u32 tmp; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); + tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); + tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | + DIG_THERM_INTL(49 + (low_temp / 1000))); + WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); + + rdev->pm.dpm.thermal.min_temp = low_temp; + rdev->pm.dpm.thermal.max_temp = high_temp; + + return 0; +} + +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; +}; + +static int kv_parse_sys_info_table(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + int i; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + + if (crev != 8) { + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + return -EINVAL; + } + pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); + pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); + pi->sys_info.bootup_nb_voltage_index = + le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); + if (igp_info->info_8.ucHtcTmpLmt == 0) + pi->sys_info.htc_tmp_lmt = 203; + else + pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; + if (igp_info->info_8.ucHtcHystLmt == 0) + pi->sys_info.htc_hyst_lmt = 5; + else + pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; + if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { + DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + } + + if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) + pi->sys_info.nb_dpm_enable = true; + else + pi->sys_info.nb_dpm_enable = false; + + for (i = 0; i < KV_NUM_NBPSTATES; i++) { + pi->sys_info.nbp_memory_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); + pi->sys_info.nbp_n_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); + } + if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & + SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + pi->caps_enable_dfs_bypass = true; + + sumo_construct_sclk_voltage_mapping_table(rdev, + &pi->sys_info.sclk_voltage_mapping_table, + igp_info->info_8.sAvail_SCLK); + + sumo_construct_vid_mapping_table(rdev, + &pi->sys_info.vid_mapping_table, + igp_info->info_8.sAvail_SCLK); + + kv_construct_max_power_limits_table(rdev, + &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + } + return 0; +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void kv_patch_boot_state(struct radeon_device *rdev, + struct kv_ps *ps) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + ps->num_levels = 1; + ps->levels[0] = pi->boot_pl; +} + +static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, + struct radeon_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + struct kv_ps *ps = kv_get_ps(rps); + + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + rdev->pm.dpm.boot_ps = rps; + kv_patch_boot_state(rdev, ps); + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + rdev->pm.dpm.uvd_ps = rps; +} + +static void kv_parse_pplib_clock_info(struct radeon_device *rdev, + struct radeon_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct kv_ps *ps = kv_get_ps(rps); + struct kv_pl *pl = &ps->levels[index]; + u32 sclk; + + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + pl->sclk = sclk; + pl->vddc_index = clock_info->sumo.vddcIndex; + + ps->num_levels = index + 1; + + if (pi->caps_sclk_ds) { + pl->ds_divider_index = 5; + pl->ss_divider_index = 5; + } +} + +static int kv_parse_power_table(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct kv_ps *ps; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * + state_array->ucNumEntries, GFP_KERNEL); + if (!rdev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); + rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); + rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); + for (i = 0; i < state_array->ucNumEntries; i++) { + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + if (!rdev->pm.power_state[i].clock_info) + return -EINVAL; + ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(rdev->pm.dpm.ps); + return -ENOMEM; + } + rdev->pm.dpm.ps[i].ps_priv = ps; + k = 0; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + kv_parse_pplib_clock_info(rdev, + &rdev->pm.dpm.ps[i], k, + clock_info); + k++; + } + kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + rdev->pm.dpm.num_ps = state_array->ucNumEntries; + return 0; +} + +int kv_dpm_init(struct radeon_device *rdev) +{ + struct kv_power_info *pi; + int ret, i; + + pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); + if (pi == NULL) + return -ENOMEM; + rdev->pm.dpm.priv = pi; + + ret = r600_parse_extended_power_table(rdev); + if (ret) + return ret; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) + pi->at[i] = TRINITY_AT_DFLT; + + pi->sram_end = SMC_RAM_END; + + if (rdev->family == CHIP_KABINI) + pi->high_voltage_t = 4001; + + pi->enable_nb_dpm = true; + + pi->caps_power_containment = true; + pi->caps_cac = true; + pi->enable_didt = false; + if (pi->enable_didt) { + pi->caps_sq_ramping = true; + pi->caps_db_ramping = true; + pi->caps_td_ramping = true; + pi->caps_tcp_ramping = true; + } + + pi->caps_sclk_ds = true; + pi->enable_auto_thermal_throttling = true; + pi->disable_nb_ps3_in_battery = false; + pi->bapm_enable = true; + pi->voltage_drop_t = 0; + pi->caps_sclk_throttle_low_notification = false; + pi->caps_fps = false; /* true? */ + pi->caps_uvd_pg = false; /* XXX */ + pi->caps_uvd_dpm = true; + pi->caps_vce_pg = false; + pi->caps_samu_pg = false; + pi->caps_acp_pg = false; + pi->caps_stable_p_state = false; + + ret = kv_parse_sys_info_table(rdev); + if (ret) + return ret; + + kv_patch_voltage_values(rdev); + kv_construct_boot_state(rdev); + + ret = kv_parse_power_table(rdev); + if (ret) + return ret; + + pi->enable_dpm = true; + + return 0; +} + +void kv_dpm_print_power_state(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + int i; + struct kv_ps *ps = kv_get_ps(rps); + + r600_dpm_print_class_info(rps->class, rps->class2); + r600_dpm_print_cap_info(rps->caps); + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->num_levels; i++) { + struct kv_pl *pl = &ps->levels[i]; + printk("\t\tpower level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); + } + r600_dpm_print_ps_status(rdev, rps); +} + +void kv_dpm_fini(struct radeon_device *rdev) +{ + int i; + + for (i = 0; i < rdev->pm.dpm.num_ps; i++) { + kfree(rdev->pm.dpm.ps[i].ps_priv); + } + kfree(rdev->pm.dpm.ps); + kfree(rdev->pm.dpm.priv); + r600_free_extended_power_table(rdev); +} + +void kv_dpm_display_configuration_changed(struct radeon_device *rdev) +{ + +} + +u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); + + if (low) + return requested_state->levels[0].sclk; + else + return requested_state->levels[requested_state->num_levels - 1].sclk; +} + +u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + return pi->sys_info.bootup_uma_clk; +} + diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h new file mode 100644 index 000000000000..32bb079572d7 --- /dev/null +++ b/drivers/gpu/drm/radeon/kv_dpm.h @@ -0,0 +1,199 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __KV_DPM_H__ +#define __KV_DPM_H__ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */ +#include "smu7_fusion.h" +#include "trinity_dpm.h" +#include "ppsmc.h" + +#define KV_NUM_NBPSTATES 4 + +enum kv_pt_config_reg_type { + KV_CONFIGREG_MMR = 0, + KV_CONFIGREG_SMC_IND, + KV_CONFIGREG_DIDT_IND, + KV_CONFIGREG_CACHE, + KV_CONFIGREG_MAX +}; + +struct kv_pt_config_reg { + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum kv_pt_config_reg_type type; +}; + +struct kv_lcac_config_values { + u32 block_id; + u32 signal_id; + u32 t; +}; + +struct kv_lcac_config_reg { + u32 cntl; + u32 block_mask; + u32 block_shift; + u32 signal_mask; + u32 signal_shift; + u32 t_mask; + u32 t_shift; + u32 enable_mask; + u32 enable_shift; +}; + +struct kv_pl { + u32 sclk; + u8 vddc_index; + u8 ds_divider_index; + u8 ss_divider_index; + u8 allow_gnb_slow; + u8 force_nbp_state; + u8 display_wm; + u8 vce_wm; +}; + +struct kv_ps { + struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 num_levels; + bool need_dfs_bypass; + u8 dpm0_pg_nb_ps_lo; + u8 dpm0_pg_nb_ps_hi; + u8 dpmx_nb_ps_lo; + u8 dpmx_nb_ps_hi; +}; + +struct kv_sys_info { + u32 bootup_uma_clk; + u32 bootup_sclk; + u32 dentist_vco_freq; + u32 nb_dpm_enable; + u32 nbp_memory_clock[KV_NUM_NBPSTATES]; + u32 nbp_n_clock[KV_NUM_NBPSTATES]; + u16 bootup_nb_voltage_index; + u8 htc_tmp_lmt; + u8 htc_hyst_lmt; + struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table; + struct sumo_vid_mapping_table vid_mapping_table; + u32 uma_channel_number; +}; + +struct kv_power_info { + u32 at[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 voltage_drop_t; + struct kv_sys_info sys_info; + struct kv_pl boot_pl; + bool enable_nb_ps_policy; + bool disable_nb_ps3_in_battery; + bool video_start; + bool battery_state; + u32 lowest_valid; + u32 highest_valid; + u16 high_voltage_t; + bool cac_enabled; + bool bapm_enable; + /* smc offsets */ + u32 sram_end; + u32 dpm_table_start; + u32 soft_regs_start; + /* dpm SMU tables */ + u8 graphics_dpm_level_count; + u8 uvd_level_count; + u8 vce_level_count; + u8 acp_level_count; + u8 samu_level_count; + u16 fps_high_t; + SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE]; + SMU7_Fusion_ACPILevel acpi_level; + SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD]; + SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE]; + SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP]; + SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU]; + u8 uvd_boot_level; + u8 vce_boot_level; + u8 acp_boot_level; + u8 samu_boot_level; + u8 uvd_interval; + u8 vce_interval; + u8 acp_interval; + u8 samu_interval; + u8 graphics_boot_level; + u8 graphics_interval; + u8 graphics_therm_throttle_enable; + u8 graphics_voltage_change_enable; + u8 graphics_clk_slow_enable; + u8 graphics_clk_slow_divider; + u8 fps_low_t; + u32 low_sclk_interrupt_t; + bool uvd_power_gated; + bool vce_power_gated; + bool acp_power_gated; + bool samu_power_gated; + bool nb_dpm_enabled; + /* flags */ + bool enable_didt; + bool enable_dpm; + bool enable_auto_thermal_throttling; + bool enable_nb_dpm; + /* caps */ + bool caps_cac; + bool caps_power_containment; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_sclk_throttle_low_notification; + bool caps_fps; + bool caps_uvd_dpm; + bool caps_uvd_pg; + bool caps_vce_pg; + bool caps_samu_pg; + bool caps_acp_pg; + bool caps_stable_p_state; + bool caps_enable_dfs_bypass; + bool caps_sclk_ds; + struct radeon_ps current_rps; + struct kv_ps current_ps; + struct radeon_ps requested_rps; + struct kv_ps requested_ps; +}; + + +/* kv_smc.c */ +int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id); +int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask); +int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev, + PPSMC_Msg msg, u32 parameter); +int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, + u32 *value, u32 limit); +int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); +int kv_copy_bytes_to_smc(struct radeon_device *rdev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); + +#endif diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c new file mode 100644 index 000000000000..34a226d7e34a --- /dev/null +++ b/drivers/gpu/drm/radeon/kv_smc.c @@ -0,0 +1,207 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include "drmP.h" +#include "radeon.h" +#include "cikd.h" +#include "kv_dpm.h" + +int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id) +{ + u32 i; + u32 tmp = 0; + + WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK); + + for (i = 0; i < rdev->usec_timeout; i++) { + if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0) + break; + udelay(1); + } + tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK; + + if (tmp != 1) { + if (tmp == 0xFF) + return -EINVAL; + else if (tmp == 0xFE) + return -EINVAL; + } + + return 0; +} + +int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask) +{ + int ret; + + ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask); + + if (ret == 0) + *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0); + + return ret; +} + +int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev, + PPSMC_Msg msg, u32 parameter) +{ + + WREG32(SMC_MSG_ARG_0, parameter); + + return kv_notify_message_to_smu(rdev, msg); +} + +static int kv_set_smc_sram_address(struct radeon_device *rdev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(SMC_IND_INDEX_0, smc_address); + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + + return 0; +} + +int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, + u32 *value, u32 limit) +{ + int ret; + + ret = kv_set_smc_sram_address(rdev, smc_address, limit); + if (ret) + return ret; + + *value = RREG32(SMC_IND_DATA_0); + return 0; +} + +int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable) +{ + if (enable) + return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable); + else + return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); +} + +int kv_copy_bytes_to_smc(struct radeon_device *rdev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + int ret; + u32 data, original_data, addr, extra_shift, t_byte, count, mask; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + t_byte = addr & 3; + + /* RMW for the initial bytes */ + if (t_byte != 0) { + addr -= t_byte; + + ret = kv_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(SMC_IND_DATA_0); + + data = 0; + mask = 0; + count = 4; + while (count > 0) { + if (t_byte > 0) { + mask = (mask << 8) | 0xff; + t_byte--; + } else if (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + mask <<= 8; + } else { + data <<= 8; + mask = (mask << 8) | 0xff; + } + count--; + } + + data |= original_data & mask; + + ret = kv_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, data); + + addr += 4; + } + + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + ret = kv_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = kv_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + original_data= RREG32(SMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* SMC address space is BE */ + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = kv_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, data); + } + return 0; +} + diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h index b5564a3645d2..6db6e320bc79 100644 --- a/drivers/gpu/drm/radeon/ppsmc.h +++ b/drivers/gpu/drm/radeon/ppsmc.h @@ -99,11 +99,45 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +/* KV/KB */ +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) + /* TN */ #define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) #define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) #define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) #define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) +#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3a55540fe280..9c83ecfd0eb7 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2610,6 +2610,20 @@ static struct radeon_asic kv_asic = { .set_uvd_clocks = &cik_set_uvd_clocks, .get_temperature = &kv_get_temp, }, + .dpm = { + .init = &kv_dpm_init, + .setup_asic = &kv_dpm_setup_asic, + .enable = &kv_dpm_enable, + .disable = &kv_dpm_disable, + .pre_set_power_state = &kv_dpm_pre_set_power_state, + .set_power_state = &kv_dpm_set_power_state, + .post_set_power_state = &kv_dpm_post_set_power_state, + .display_configuration_changed = &kv_dpm_display_configuration_changed, + .fini = &kv_dpm_fini, + .get_sclk = &kv_dpm_get_sclk, + .get_mclk = &kv_dpm_get_mclk, + .print_power_state = &kv_dpm_print_power_state, + }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index d5c6c5b10edf..68a1a1fb371d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -750,4 +750,18 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, int ci_get_temp(struct radeon_device *rdev); int kv_get_temp(struct radeon_device *rdev); +int kv_dpm_init(struct radeon_device *rdev); +int kv_dpm_enable(struct radeon_device *rdev); +void kv_dpm_disable(struct radeon_device *rdev); +int kv_dpm_pre_set_power_state(struct radeon_device *rdev); +int kv_dpm_set_power_state(struct radeon_device *rdev); +void kv_dpm_post_set_power_state(struct radeon_device *rdev); +void kv_dpm_setup_asic(struct radeon_device *rdev); +void kv_dpm_display_configuration_changed(struct radeon_device *rdev); +void kv_dpm_fini(struct radeon_device *rdev); +u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low); +u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low); +void kv_dpm_print_power_state(struct radeon_device *rdev, + struct radeon_ps *ps); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 1408014dce8f..37d3d343f687 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1202,6 +1202,8 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: + case CHIP_KABINI: + case CHIP_KAVERI: /* DPM requires the RLC, RV770+ dGPU requires SMC */ if (!rdev->rlc_fw) rdev->pm.pm_method = PM_METHOD_PROFILE; diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h new file mode 100644 index 000000000000..75a380a15292 --- /dev/null +++ b/drivers/gpu/drm/radeon/smu7.h @@ -0,0 +1,170 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU7_H +#define SMU7_H + +#pragma pack(push, 1) + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU7_MAX_LEVELS_VDDC 8 +#define SMU7_MAX_LEVELS_VDDCI 4 +#define SMU7_MAX_LEVELS_MVDD 4 +#define SMU7_MAX_LEVELS_VDDNB 8 + +#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV +#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM +#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels +#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes. +#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD. +#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE. +#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP. +#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU. +#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table. + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< Date: Wed, 14 Aug 2013 01:03:41 -0400 Subject: drm/radeon: add dpm support for CI dGPUs (v2) This adds dpm support for btc asics. This includes: - dynamic engine clock scaling - dynamic memory clock scaling - dynamic voltage scaling - dynamic pcie gen switching Set radeon.dpm=1 to enable. v2: remove unused radeon_atombios.c changes, make missing smc ucode non-fatal Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 3 +- drivers/gpu/drm/radeon/ci_dpm.c | 5006 ++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/ci_dpm.h | 331 ++ drivers/gpu/drm/radeon/ci_smc.c | 262 ++ drivers/gpu/drm/radeon/cik.c | 41 +- drivers/gpu/drm/radeon/cikd.h | 259 +- drivers/gpu/drm/radeon/ppsmc.h | 24 +- drivers/gpu/drm/radeon/radeon.h | 6 + drivers/gpu/drm/radeon/radeon_asic.c | 14 + drivers/gpu/drm/radeon/radeon_asic.h | 14 + drivers/gpu/drm/radeon/radeon_atombios.c | 2 +- drivers/gpu/drm/radeon/radeon_pm.c | 1 + drivers/gpu/drm/radeon/radeon_ucode.h | 3 + drivers/gpu/drm/radeon/si_dpm.c | 10 +- drivers/gpu/drm/radeon/smu7_discrete.h | 486 +++ 15 files changed, 6447 insertions(+), 15 deletions(-) create mode 100644 drivers/gpu/drm/radeon/ci_dpm.c create mode 100644 drivers/gpu/drm/radeon/ci_dpm.h create mode 100644 drivers/gpu/drm/radeon/ci_smc.c create mode 100644 drivers/gpu/drm/radeon/smu7_discrete.h diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index d3265b5d4661..ea913cc681b4 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -79,7 +79,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ - trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o + trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ + ci_dpm.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c new file mode 100644 index 000000000000..72ab92b60e6e --- /dev/null +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -0,0 +1,5006 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "radeon.h" +#include "cikd.h" +#include "r600_dpm.h" +#include "ci_dpm.h" +#include "atom.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x40000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +static const struct ci_pt_defaults defaults_bonaire_xt = +{ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +static const struct ci_pt_defaults defaults_bonaire_pro = +{ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, + { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, + { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } +}; + +static const struct ci_pt_defaults defaults_saturn_xt = +{ + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, + { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, + { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_saturn_pro = +{ + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, + { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, + { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } +}; + +static const struct ci_pt_config_reg didt_config_ci[] = +{ + { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + +extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); +extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, + u32 arb_freq_src, u32 arb_freq_dest); +extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); +extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode); +extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, + u32 max_voltage_steps, + struct atom_voltage_table *voltage_table); +extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); +extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); + +static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, + struct atom_voltage_table_entry *voltage_table, + u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); +static int ci_set_power_limit(struct radeon_device *rdev, u32 n); +static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, + u32 target_tdp); +static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); + +static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) +{ + struct ci_power_info *pi = rdev->pm.dpm.priv; + + return pi; +} + +static struct ci_ps *ci_get_ps(struct radeon_ps *rps) +{ + struct ci_ps *ps = rps->ps_priv; + + return ps; +} + +static void ci_initialize_powertune_defaults(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + switch (rdev->pdev->device) { + case 0x6650: + case 0x6658: + case 0x665C: + default: + pi->powertune_defaults = &defaults_bonaire_xt; + break; + case 0x6651: + case 0x665D: + pi->powertune_defaults = &defaults_bonaire_pro; + break; + case 0x6640: + pi->powertune_defaults = &defaults_saturn_xt; + break; + case 0x6641: + pi->powertune_defaults = &defaults_saturn_pro; + break; + } + + pi->dte_tj_offset = 0; + + pi->caps_power_containment = true; + pi->caps_cac = false; + pi->caps_sq_ramping = false; + pi->caps_db_ramping = false; + pi->caps_td_ramping = false; + pi->caps_tcp_ramping = false; + + if (pi->caps_power_containment) { + pi->caps_cac = true; + pi->enable_bapm_feature = true; + pi->enable_tdc_limit_feature = true; + pi->enable_pkg_pwr_tracking_feature = true; + } +} + +static u8 ci_convert_to_vid(u16 vddc) +{ + return (6200 - (vddc * VOLTAGE_SCALE)) / 25; +} + +static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; + u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; + u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; + u32 i; + + if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) + return -EINVAL; + if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) + return -EINVAL; + if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != + rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) + return -EINVAL; + + for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); + hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); + hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); + } else { + lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); + hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); + } + } + return 0; +} + +static int ci_populate_vddc_vid(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u8 *vid = pi->smc_powertune_table.VddCVid; + u32 i; + + if (pi->vddc_voltage_table.count > 8) + return -EINVAL; + + for (i = 0; i < pi->vddc_voltage_table.count; i++) + vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); + + return 0; +} + +static int ci_populate_svi_load_line(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + + pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; + pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; + pi->smc_powertune_table.SviLoadLineTrimVddC = 3; + pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int ci_populate_tdc_limit(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + u16 tdc_limit; + + tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; + pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); + pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + pt_defaults->tdc_vddc_throttle_release_limit_perc; + pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; + + return 0; +} + +static int ci_populate_dw8(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + int ret; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable) + + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), + (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, + pi->sram_end); + if (ret) + return -EINVAL; + else + pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; + + return 0; +} + +static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; + u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; + int i, min, max; + + min = max = hi_vid[0]; + for (i = 0; i < 8; i++) { + if (0 != hi_vid[i]) { + if (min > hi_vid[i]) + min = hi_vid[i]; + if (max < hi_vid[i]) + max = hi_vid[i]; + } + + if (0 != lo_vid[i]) { + if (min > lo_vid[i]) + min = lo_vid[i]; + if (max < lo_vid[i]) + max = lo_vid[i]; + } + } + + if ((min == 0) || (max == 0)) + return -EINVAL; + pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; + pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; + + return 0; +} + +static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; + u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; + struct radeon_cac_tdp_table *cac_tdp_table = + rdev->pm.dpm.dyn_state.cac_tdp_table; + + hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; + lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; + + pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); + pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); + + return 0; +} + +static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; + struct radeon_cac_tdp_table *cac_tdp_table = + rdev->pm.dpm.dyn_state.cac_tdp_table; + struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; + int i, j, k; + const u16 *def1; + const u16 *def2; + + dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; + dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; + + dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; + dpm_table->GpuTjMax = + (u8)(pi->thermal_temp_setting.temperature_high / 1000); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; + + if (ppm) { + dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); + dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); + } else { + dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); + dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); + } + + dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); + def1 = pt_defaults->bapmti_r; + def2 = pt_defaults->bapmti_rc; + + for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU7_DTE_SOURCES; j++) { + for (k = 0; k < SMU7_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); + dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int ci_populate_pm_base(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 pm_fuse_table_offset; + int ret; + + if (pi->caps_power_containment) { + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, pi->sram_end); + if (ret) + return ret; + ret = ci_populate_bapm_vddc_vid_sidd(rdev); + if (ret) + return ret; + ret = ci_populate_vddc_vid(rdev); + if (ret) + return ret; + ret = ci_populate_svi_load_line(rdev); + if (ret) + return ret; + ret = ci_populate_tdc_limit(rdev); + if (ret) + return ret; + ret = ci_populate_dw8(rdev); + if (ret) + return ret; + ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); + if (ret) + return ret; + ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); + if (ret) + return ret; + ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, + (u8 *)&pi->smc_powertune_table, + sizeof(SMU7_Discrete_PmFuses), pi->sram_end); + if (ret) + return ret; + } + + return 0; +} + +static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 data; + + if (pi->caps_sq_ramping) { + data = RREG32_DIDT(DIDT_SQ_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_SQ_CTRL0, data); + } + + if (pi->caps_db_ramping) { + data = RREG32_DIDT(DIDT_DB_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_DB_CTRL0, data); + } + + if (pi->caps_td_ramping) { + data = RREG32_DIDT(DIDT_TD_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_TD_CTRL0, data); + } + + if (pi->caps_tcp_ramping) { + data = RREG32_DIDT(DIDT_TCP_CTRL0); + if (enable) + data |= DIDT_CTRL_EN; + else + data &= ~DIDT_CTRL_EN; + WREG32_DIDT(DIDT_TCP_CTRL0, data); + } +} + +static int ci_program_pt_config_registers(struct radeon_device *rdev, + const struct ci_pt_config_reg *cac_config_regs) +{ + const struct ci_pt_config_reg *config_regs = cac_config_regs; + u32 data; + u32 cache = 0; + + if (config_regs == NULL) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + } else { + switch (config_regs->type) { + case CISLANDS_CONFIGREG_SMC_IND: + data = RREG32_SMC(config_regs->offset); + break; + case CISLANDS_CONFIGREG_DIDT_IND: + data = RREG32_DIDT(config_regs->offset); + break; + default: + data = RREG32(config_regs->offset << 2); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case CISLANDS_CONFIGREG_SMC_IND: + WREG32_SMC(config_regs->offset, data); + break; + case CISLANDS_CONFIGREG_DIDT_IND: + WREG32_DIDT(config_regs->offset, data); + break; + default: + WREG32(config_regs->offset << 2, data); + break; + } + cache = 0; + } + config_regs++; + } + return 0; +} + +static int ci_enable_didt(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + + if (pi->caps_sq_ramping || pi->caps_db_ramping || + pi->caps_td_ramping || pi->caps_tcp_ramping) { + cik_enter_rlc_safe_mode(rdev); + + if (enable) { + ret = ci_program_pt_config_registers(rdev, didt_config_ci); + if (ret) { + cik_exit_rlc_safe_mode(rdev); + return ret; + } + } + + ci_do_enable_didt(rdev, enable); + + cik_exit_rlc_safe_mode(rdev); + } + + return 0; +} + +static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + int ret = 0; + + if (enable) { + pi->power_containment_features = 0; + if (pi->caps_power_containment) { + if (pi->enable_bapm_feature) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + else + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; + } + + if (pi->enable_tdc_limit_feature) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + else + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (pi->enable_pkg_pwr_tracking_feature) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + } else { + struct radeon_cac_tdp_table *cac_tdp_table = + rdev->pm.dpm.dyn_state.cac_tdp_table; + u32 default_pwr_limit = + (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); + + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + ci_set_power_limit(rdev, default_pwr_limit); + } + } + } + } else { + if (pi->caps_power_containment && pi->power_containment_features) { + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) + ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) + ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) + ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); + pi->power_containment_features = 0; + } + } + + return ret; +} + +static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + int ret = 0; + + if (pi->caps_cac) { + if (enable) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + pi->cac_enabled = false; + } else { + pi->cac_enabled = true; + } + } else if (pi->cac_enabled) { + ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); + pi->cac_enabled = false; + } + } + + return ret; +} + +static int ci_power_control_set_level(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_cac_tdp_table *cac_tdp_table = + rdev->pm.dpm.dyn_state.cac_tdp_table; + s32 adjust_percent; + s32 target_tdp; + int ret = 0; + bool adjust_polarity = false; /* ??? */ + + if (pi->caps_power_containment && + (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) { + adjust_percent = adjust_polarity ? + rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); + target_tdp = ((100 + adjust_percent) * + (s32)cac_tdp_table->configurable_tdp) / 100; + target_tdp *= 256; + + ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); + } + + return ret; +} + +static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) +{ + ci_update_uvd_dpm(rdev, gate); +} + +static void ci_apply_state_adjust_rules(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct ci_ps *ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_clock_and_voltage_limits *max_limits; + bool disable_mclk_switching; + u32 sclk, mclk; + int i; + + if (rdev->pm.dpm.new_active_crtc_count > 1) + disable_mclk_switching = true; + else + disable_mclk_switching = false; + + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (rdev->pm.dpm.ac_power) + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (rdev->pm.dpm.ac_power == false) { + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk > max_limits->mclk) + ps->performance_levels[i].mclk = max_limits->mclk; + if (ps->performance_levels[i].sclk > max_limits->sclk) + ps->performance_levels[i].sclk = max_limits->sclk; + } + } + + /* XXX validate the min clocks required for display */ + + if (disable_mclk_switching) { + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; + sclk = ps->performance_levels[0].sclk; + } else { + mclk = ps->performance_levels[0].mclk; + sclk = ps->performance_levels[0].sclk; + } + + ps->performance_levels[0].sclk = sclk; + ps->performance_levels[0].mclk = mclk; + + if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) + ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; + + if (disable_mclk_switching) { + if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) + ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; + } else { + if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) + ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; + } +} + +static int ci_set_thermal_temperature_range(struct radeon_device *rdev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + u32 tmp; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + tmp = RREG32_SMC(CG_THERMAL_INT); + tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); + tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | + CI_DIG_THERM_INTL(low_temp / 1000); + WREG32_SMC(CG_THERMAL_INT, tmp); + +#if 0 + /* XXX: need to figure out how to handle this properly */ + tmp = RREG32_SMC(CG_THERMAL_CTRL); + tmp &= DIG_THERM_DPM_MASK; + tmp |= DIG_THERM_DPM(high_temp / 1000); + WREG32_SMC(CG_THERMAL_CTRL, tmp); +#endif + + return 0; +} + +#if 0 +static int ci_read_smc_soft_register(struct radeon_device *rdev, + u16 reg_offset, u32 *value) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + return ci_read_smc_sram_dword(rdev, + pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} +#endif + +static int ci_write_smc_soft_register(struct radeon_device *rdev, + u16 reg_offset, u32 value) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + return ci_write_smc_sram_dword(rdev, + pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} + +static void ci_init_fps_limits(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + SMU7_Discrete_DpmTable *table = &pi->smc_state_table; + + if (pi->caps_fps) { + u16 tmp; + + tmp = 45; + table->FpsHighT = cpu_to_be16(tmp); + + tmp = 30; + table->FpsLowT = cpu_to_be16(tmp); + } +} + +static int ci_update_sclk_t(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret = 0; + u32 low_sclk_interrupt_t = 0; + + if (pi->caps_sclk_throttle_low_notification) { + low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); + + ret = ci_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), + (u8 *)&low_sclk_interrupt_t, + sizeof(u32), pi->sram_end); + + } + + return ret; +} + +static void ci_get_leakage_voltages(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u16 leakage_id, virtual_voltage_id; + u16 vddc, vddci; + int i; + + pi->vddc_leakage.count = 0; + pi->vddci_leakage.count = 0; + + if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, + virtual_voltage_id, + leakage_id) == 0) { + if (vddc != 0 && vddc != virtual_voltage_id) { + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; + pi->vddc_leakage.count++; + } + if (vddci != 0 && vddci != virtual_voltage_id) { + pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; + pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; + pi->vddci_leakage.count++; + } + } + } + } +} + +static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + bool want_thermal_protection; + enum radeon_dpm_event_src dpm_event_src; + u32 tmp; + + switch (sources) { + case 0: + default: + want_thermal_protection = false; + break; + case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): + want_thermal_protection = true; + dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; + break; + case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): + want_thermal_protection = true; + dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; + break; + case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | + (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): + want_thermal_protection = true; + dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; + break; + } + + if (want_thermal_protection) { +#if 0 + /* XXX: need to figure out how to handle this properly */ + tmp = RREG32_SMC(CG_THERMAL_CTRL); + tmp &= DPM_EVENT_SRC_MASK; + tmp |= DPM_EVENT_SRC(dpm_event_src); + WREG32_SMC(CG_THERMAL_CTRL, tmp); +#endif + + tmp = RREG32_SMC(GENERAL_PWRMGT); + if (pi->thermal_protection) + tmp &= ~THERMAL_PROTECTION_DIS; + else + tmp |= THERMAL_PROTECTION_DIS; + WREG32_SMC(GENERAL_PWRMGT, tmp); + } else { + tmp = RREG32_SMC(GENERAL_PWRMGT); + tmp |= THERMAL_PROTECTION_DIS; + WREG32_SMC(GENERAL_PWRMGT, tmp); + } +} + +static void ci_enable_auto_throttle_source(struct radeon_device *rdev, + enum radeon_dpm_auto_throttle_src source, + bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (enable) { + if (!(pi->active_auto_throttle_sources & (1 << source))) { + pi->active_auto_throttle_sources |= 1 << source; + ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); + } + } else { + if (pi->active_auto_throttle_sources & (1 << source)) { + pi->active_auto_throttle_sources &= ~(1 << source); + ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); + } + } +} + +static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) +{ + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) + ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); +} + +static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if ((!pi->sclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if ((!pi->mclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + pi->need_update_smu7_dpm_table = 0; + return 0; +} + +static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + + if (enable) { + if (!pi->sclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if (!pi->mclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); + + WREG32_SMC(LCAC_MC0_CNTL, 0x05); + WREG32_SMC(LCAC_MC1_CNTL, 0x05); + WREG32_SMC(LCAC_CPL_CNTL, 0x100005); + + udelay(10); + + WREG32_SMC(LCAC_MC0_CNTL, 0x400005); + WREG32_SMC(LCAC_MC1_CNTL, 0x400005); + WREG32_SMC(LCAC_CPL_CNTL, 0x500005); + } + } else { + if (!pi->sclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if (!pi->mclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + } + + return 0; +} + +static int ci_start_dpm(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + int ret; + u32 tmp; + + tmp = RREG32_SMC(GENERAL_PWRMGT); + tmp |= GLOBAL_PWRMGT_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); + + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); + tmp |= DYNAMIC_PM_EN; + WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); + + ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); + + WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); + + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + ret = ci_enable_sclk_mclk_dpm(rdev, true); + if (ret) + return ret; + + if (!pi->pcie_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if ((!pi->sclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if ((!pi->mclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_stop_dpm(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + int ret; + u32 tmp; + + tmp = RREG32_SMC(GENERAL_PWRMGT); + tmp &= ~GLOBAL_PWRMGT_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); + + tmp = RREG32(SCLK_PWRMGT_CNTL); + tmp &= ~DYNAMIC_PM_EN; + WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); + + if (!pi->pcie_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + ret = ci_enable_sclk_mclk_dpm(rdev, false); + if (ret) + return ret; + + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + return 0; +} + +static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) +{ + u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); + + if (enable) + tmp &= ~SCLK_PWRMGT_OFF; + else + tmp |= SCLK_PWRMGT_OFF; + WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); +} + +#if 0 +static int ci_notify_hw_of_power_source(struct radeon_device *rdev, + bool ac_power) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_cac_tdp_table *cac_tdp_table = + rdev->pm.dpm.dyn_state.cac_tdp_table; + u32 power_limit; + + if (ac_power) + power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); + else + power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); + + ci_set_power_limit(rdev, power_limit); + + if (pi->caps_automatic_dc_transition) { + if (ac_power) + ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); + else + ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); + } + + return 0; +} +#endif + +static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, + PPSMC_Msg msg, u32 parameter) +{ + WREG32(SMC_MSG_ARG_0, parameter); + return ci_send_msg_to_smc(rdev, msg); +} + +static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, + PPSMC_Msg msg, u32 *parameter) +{ + PPSMC_Result smc_result; + + smc_result = ci_send_msg_to_smc(rdev, msg); + + if ((smc_result == PPSMC_Result_OK) && parameter) + *parameter = RREG32(SMC_MSG_ARG_0); + + return smc_result; +} + +static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (!pi->sclk_dpm_key_disabled) { + PPSMC_Result smc_result = + ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (!pi->mclk_dpm_key_disabled) { + PPSMC_Result smc_result = + ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (!pi->pcie_dpm_key_disabled) { + PPSMC_Result smc_result = + ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_set_power_limit(struct radeon_device *rdev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + PPSMC_Result smc_result = + ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, + u32 target_tdp) +{ + PPSMC_Result smc_result = + ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + return 0; +} + +static int ci_set_boot_state(struct radeon_device *rdev) +{ + return ci_enable_sclk_mclk_dpm(rdev, false); +} + +static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) +{ + u32 sclk_freq; + PPSMC_Result smc_result = + ci_send_msg_to_smc_return_parameter(rdev, + PPSMC_MSG_API_GetSclkFrequency, + &sclk_freq); + if (smc_result != PPSMC_Result_OK) + sclk_freq = 0; + + return sclk_freq; +} + +static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) +{ + u32 mclk_freq; + PPSMC_Result smc_result = + ci_send_msg_to_smc_return_parameter(rdev, + PPSMC_MSG_API_GetMclkFrequency, + &mclk_freq); + if (smc_result != PPSMC_Result_OK) + mclk_freq = 0; + + return mclk_freq; +} + +static void ci_dpm_start_smc(struct radeon_device *rdev) +{ + int i; + + ci_program_jump_on_start(rdev); + ci_start_smc_clock(rdev); + ci_start_smc(rdev); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) + break; + } +} + +static void ci_dpm_stop_smc(struct radeon_device *rdev) +{ + ci_reset_smc(rdev); + ci_stop_smc_clock(rdev); +} + +static int ci_process_firmware_header(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + int ret; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->dpm_table_start = tmp; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->soft_regs_start = tmp; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcRegisterTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->mc_reg_table_start = tmp; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, FanTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->fan_table_start = tmp; + + ret = ci_read_smc_sram_dword(rdev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->arb_table_start = tmp; + + return 0; +} + +static void ci_read_clock_registers(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + pi->clock_registers.cg_spll_func_cntl = + RREG32_SMC(CG_SPLL_FUNC_CNTL); + pi->clock_registers.cg_spll_func_cntl_2 = + RREG32_SMC(CG_SPLL_FUNC_CNTL_2); + pi->clock_registers.cg_spll_func_cntl_3 = + RREG32_SMC(CG_SPLL_FUNC_CNTL_3); + pi->clock_registers.cg_spll_func_cntl_4 = + RREG32_SMC(CG_SPLL_FUNC_CNTL_4); + pi->clock_registers.cg_spll_spread_spectrum = + RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); + pi->clock_registers.cg_spll_spread_spectrum_2 = + RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); + pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); + pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); + pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); + pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); + pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); + pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); + pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); + pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); + pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); +} + +static void ci_init_sclk_t(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + pi->low_sclk_interrupt_t = 0; +} + +static void ci_enable_thermal_protection(struct radeon_device *rdev, + bool enable) +{ + u32 tmp = RREG32_SMC(GENERAL_PWRMGT); + + if (enable) + tmp &= ~THERMAL_PROTECTION_DIS; + else + tmp |= THERMAL_PROTECTION_DIS; + WREG32_SMC(GENERAL_PWRMGT, tmp); +} + +static void ci_enable_acpi_power_management(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(GENERAL_PWRMGT); + + tmp |= STATIC_PM_EN; + + WREG32_SMC(GENERAL_PWRMGT, tmp); +} + +#if 0 +static int ci_enter_ulp_state(struct radeon_device *rdev) +{ + + WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); + + udelay(25000); + + return 0; +} + +static int ci_exit_ulp_state(struct radeon_device *rdev) +{ + int i; + + WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); + + udelay(7000); + + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(SMC_RESP_0) == 1) + break; + udelay(1000); + } + + return 0; +} +#endif + +static int ci_notify_smc_display_change(struct radeon_device *rdev, + bool has_display) +{ + PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; + + return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; +} + +static int ci_enable_ds_master_switch(struct radeon_device *rdev, + bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (enable) { + if (pi->caps_sclk_ds) { + if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) + return -EINVAL; + } else { + if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) + return -EINVAL; + } + } else { + if (pi->caps_sclk_ds) { + if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) + return -EINVAL; + } + } + + return 0; +} + +static void ci_program_display_gap(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); + u32 pre_vbi_time_in_us; + u32 frame_time_in_us; + u32 ref_clock = rdev->clock.spll.reference_freq; + u32 refresh_rate = r600_dpm_get_vrefresh(rdev); + u32 vblank_time = r600_dpm_get_vblank_time(rdev); + + tmp &= ~DISP_GAP_MASK; + if (rdev->pm.dpm.new_active_crtc_count > 0) + tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + else + tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); + WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); + + if (refresh_rate == 0) + refresh_rate = 60; + if (vblank_time == 0xffffffff) + vblank_time = 500; + frame_time_in_us = 1000000 / refresh_rate; + pre_vbi_time_in_us = + frame_time_in_us - 200 - vblank_time; + tmp = pre_vbi_time_in_us * (ref_clock / 100); + + WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); + ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); + ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + + + ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); + +} + +static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + + if (enable) { + if (pi->caps_sclk_ss_support) { + tmp = RREG32_SMC(GENERAL_PWRMGT); + tmp |= DYN_SPREAD_SPECTRUM_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); + } + } else { + tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); + tmp &= ~SSEN; + WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); + + tmp = RREG32_SMC(GENERAL_PWRMGT); + tmp &= ~DYN_SPREAD_SPECTRUM_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); + } +} + +static void ci_program_sstp(struct radeon_device *rdev) +{ + WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); +} + +static void ci_enable_display_gap(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); + + tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); + tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | + DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); + + WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); +} + +static void ci_program_vc(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); + tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); + WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); + + WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); + WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); + WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); + WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); + WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); + WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); + WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); + WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); +} + +static void ci_clear_vc(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); + tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); + WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); + + WREG32_SMC(CG_FTV_0, 0); + WREG32_SMC(CG_FTV_1, 0); + WREG32_SMC(CG_FTV_2, 0); + WREG32_SMC(CG_FTV_3, 0); + WREG32_SMC(CG_FTV_4, 0); + WREG32_SMC(CG_FTV_5, 0); + WREG32_SMC(CG_FTV_6, 0); + WREG32_SMC(CG_FTV_7, 0); +} + +static int ci_upload_firmware(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int i, ret; + + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) + break; + } + WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); + + ci_stop_smc_clock(rdev); + ci_reset_smc(rdev); + + ret = ci_load_smc_ucode(rdev, pi->sram_end); + + return ret; + +} + +static int ci_get_svi2_voltage_table(struct radeon_device *rdev, + struct radeon_clock_voltage_dependency_table *voltage_dependency_table, + struct atom_voltage_table *voltage_table) +{ + u32 i; + + if (voltage_dependency_table == NULL) + return -EINVAL; + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = voltage_dependency_table->count; + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +static int ci_construct_voltage_tables(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT, + &pi->vddc_voltage_table); + if (ret) + return ret; + } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &pi->vddc_voltage_table); + if (ret) + return ret; + } + + if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) + si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, + &pi->vddc_voltage_table); + + if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT, + &pi->vddci_voltage_table); + if (ret) + return ret; + } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(rdev, + &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &pi->vddci_voltage_table); + if (ret) + return ret; + } + + if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) + si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, + &pi->vddci_voltage_table); + + if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT, + &pi->mvdd_voltage_table); + if (ret) + return ret; + } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(rdev, + &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + &pi->mvdd_voltage_table); + if (ret) + return ret; + } + + if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) + si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, + &pi->mvdd_voltage_table); + + return 0; +} + +static void ci_populate_smc_voltage_table(struct radeon_device *rdev, + struct atom_voltage_table_entry *voltage_table, + SMU7_Discrete_VoltageLevel *smc_voltage_table) +{ + int ret; + + ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, + &smc_voltage_table->StdVoltageHiSidd, + &smc_voltage_table->StdVoltageLoSidd); + + if (ret) { + smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; + smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; + } + + smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); + smc_voltage_table->StdVoltageHiSidd = + cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); + smc_voltage_table->StdVoltageLoSidd = + cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); +} + +static int ci_populate_smc_vddc_table(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + unsigned int count; + + table->VddcLevelCount = pi->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + ci_populate_smc_voltage_table(rdev, + &pi->vddc_voltage_table.entries[count], + &table->VddcLevel[count]); + + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->VddcLevel[count].Smio |= + pi->vddc_voltage_table.entries[count].smio_low; + else + table->VddcLevel[count].Smio = 0; + } + table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); + + return 0; +} + +static int ci_populate_smc_vddci_table(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + unsigned int count; + struct ci_power_info *pi = ci_get_pi(rdev); + + table->VddciLevelCount = pi->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + ci_populate_smc_voltage_table(rdev, + &pi->vddci_voltage_table.entries[count], + &table->VddciLevel[count]); + + if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->VddciLevel[count].Smio |= + pi->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio = 0; + } + table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); + + return 0; +} + +static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + unsigned int count; + + table->MvddLevelCount = pi->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + ci_populate_smc_voltage_table(rdev, + &pi->mvdd_voltage_table.entries[count], + &table->MvddLevel[count]); + + if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->MvddLevel[count].Smio |= + pi->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio = 0; + } + table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); + + return 0; +} + +static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + int ret; + + ret = ci_populate_smc_vddc_table(rdev, table); + if (ret) + return ret; + + ret = ci_populate_smc_vddci_table(rdev, table); + if (ret) + return ret; + + ret = ci_populate_smc_mvdd_table(rdev, table); + if (ret) + return ret; + + return 0; +} + +static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, + SMU7_Discrete_VoltageLevel *voltage) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 i = 0; + + if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { + if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { + voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; + break; + } + } + + if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) + return -EINVAL; + } + + return -EINVAL; +} + +static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, + struct atom_voltage_table_entry *voltage_table, + u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) +{ + u16 v_index, idx; + bool voltage_found = false; + *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; + *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; + + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) + return -EINVAL; + + if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { + for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (voltage_table->value == + rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) + idx = v_index; + else + idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; + *std_voltage_lo_sidd = + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; + *std_voltage_hi_sidd = + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; + break; + } + } + + if (!voltage_found) { + for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (voltage_table->value <= + rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) + idx = v_index; + else + idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; + *std_voltage_lo_sidd = + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; + *std_voltage_hi_sidd = + rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; + break; + } + } + } + } + + return 0; +} + +static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, + const struct radeon_phase_shedding_limits_table *limits, + u32 sclk, + u32 *phase_shedding) +{ + unsigned int i; + + *phase_shedding = 1; + + for (i = 0; i < limits->count; i++) { + if (sclk < limits->entries[i].sclk) { + *phase_shedding = i; + break; + } + } +} + +static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, + const struct radeon_phase_shedding_limits_table *limits, + u32 mclk, + u32 *phase_shedding) +{ + unsigned int i; + + *phase_shedding = 1; + + for (i = 0; i < limits->count; i++) { + if (mclk < limits->entries[i].mclk) { + *phase_shedding = i; + break; + } + } +} + +static int ci_init_arb_table_index(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + int ret; + + ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, + &tmp, pi->sram_end); + if (ret) + return ret; + + tmp &= 0x00FFFFFF; + tmp |= MC_CG_ARB_FREQ_F1 << 24; + + return ci_write_smc_sram_dword(rdev, pi->arb_table_start, + tmp, pi->sram_end); +} + +static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, + struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, + u32 clock, u32 *voltage) +{ + u32 i = 0; + + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *voltage = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + *voltage = allowed_clock_voltage_table->entries[i-1].v; + + return 0; +} + +static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, + u32 sclk, u32 min_sclk_in_sr) +{ + u32 i; + u32 tmp; + u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? + min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; + + if (sclk < min) + return 0; + + for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + tmp = sclk / (1 << i); + if (tmp >= min || i == 0) + break; + } + + return (u8)i; +} + +static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) +{ + return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int ci_reset_to_default(struct radeon_device *rdev) +{ + return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); +} + +static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, + u32 sclk, + u32 mclk, + SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + u32 dram_timing; + u32 dram_timing2; + u32 burst_time; + + radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); + + dram_timing = RREG32(MC_ARB_DRAM_TIMING); + dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); + burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; + + arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); + arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); + arb_regs->McArbBurstTime = (u8)burst_time; + + return 0; +} + +static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + SMU7_Discrete_MCArbDramTimingTable arb_regs; + u32 i, j; + int ret = 0; + + memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { + for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { + ret = ci_populate_memory_timing_parameters(rdev, + pi->dpm_table.sclk_table.dpm_levels[i].value, + pi->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (ret) + break; + } + } + + if (ret == 0) + ret = ci_copy_bytes_to_smc(rdev, + pi->arb_table_start, + (u8 *)&arb_regs, + sizeof(SMU7_Discrete_MCArbDramTimingTable), + pi->sram_end); + + return ret; +} + +static int ci_program_memory_timing_parameters(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (pi->need_update_smu7_dpm_table == 0) + return 0; + + return ci_do_program_memory_timing_parameters(rdev); +} + +static void ci_populate_smc_initial_state(struct radeon_device *rdev, + struct radeon_ps *radeon_boot_state) +{ + struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); + struct ci_power_info *pi = ci_get_pi(rdev); + u32 level = 0; + + for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= + boot_state->performance_levels[0].sclk) { + pi->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= + boot_state->performance_levels[0].mclk) { + pi->smc_state_table.MemoryBootLevel = level; + break; + } + } +} + +static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) +{ + u32 i; + u32 mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + + return mask_value; +} + +static void ci_populate_smc_link_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 i; + + for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].DownT = cpu_to_be32(5); + table->LinkLevel[i].UpT = cpu_to_be32(30); + } + + pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; + pi->dpm_level_enable_mask.pcie_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); +} + +static int ci_populate_smc_uvd_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->UvdLevelCount = + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; + table->UvdLevel[count].DclkFrequency = + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; + table->UvdLevel[count].MinVddc = + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->UvdLevel[count].MinVddcPhases = 1; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->UvdLevel[count].VclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->UvdLevel[count].DclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; + + table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); + table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); + table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); + } + + return ret; +} + +static int ci_populate_smc_vce_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->VceLevelCount = + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = + rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; + table->VceLevel[count].MinVoltage = + (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->VceLevel[count].MinPhases = 1; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->VceLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->VceLevel[count].Divider = (u8)dividers.post_divider; + + table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); + table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); + } + + return ret; + +} + +static int ci_populate_smc_acp_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->AcpLevelCount = (u8) + (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; + table->AcpLevel[count].MinVoltage = + rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; + table->AcpLevel[count].MinPhases = 1; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->AcpLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->AcpLevel[count].Divider = (u8)dividers.post_divider; + + table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); + table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); + } + + return ret; +} + +static int ci_populate_smc_samu_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->SamuLevelCount = + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; + + for (count = 0; count < table->SamuLevelCount; count++) { + table->SamuLevel[count].Frequency = + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; + table->SamuLevel[count].MinVoltage = + rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->SamuLevel[count].MinPhases = 1; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->SamuLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->SamuLevel[count].Divider = (u8)dividers.post_divider; + + table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); + table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); + } + + return ret; +} + +static int ci_calculate_mclk_params(struct radeon_device *rdev, + u32 memory_clock, + SMU7_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dll_state_on) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 dll_cntl = pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; + u32 mpll_ss1 = pi->clock_registers.mpll_ss1; + u32 mpll_ss2 = pi->clock_registers.mpll_ss2; + struct atom_mpll_param mpll_param; + int ret; + + ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); + if (ret) + return ret; + + mpll_func_cntl &= ~BWCTRL_MASK; + mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); + + mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); + mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | + CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); + + mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; + mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); + + if (pi->mem_gddr5) { + mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); + mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | + YCLK_POST_DIV(mpll_param.post_div); + } + + if (pi->caps_mclk_ss_support) { + struct radeon_atom_ss ss; + u32 freq_nom; + u32 tmp; + u32 reference_clock = rdev->clock.mpll.reference_freq; + + if (pi->mem_gddr5) + freq_nom = memory_clock * 4; + else + freq_nom = memory_clock * 2; + + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + if (radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, freq_nom)) { + u32 clks = reference_clock * 5 / ss.rate; + u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); + + mpll_ss1 &= ~CLKV_MASK; + mpll_ss1 |= CLKV(clkv); + + mpll_ss2 &= ~CLKS_MASK; + mpll_ss2 |= CLKS(clks); + } + } + + mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; + mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); + + if (dll_state_on) + mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; + else + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static int ci_populate_single_memory_level(struct radeon_device *rdev, + u32 memory_clock, + SMU7_Discrete_MemoryLevel *memory_level) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + bool dll_state_on; + + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + memory_clock, &memory_level->MinVddc); + if (ret) + return ret; + } + + if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(rdev, + &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + memory_clock, &memory_level->MinVddci); + if (ret) + return ret; + } + + if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(rdev, + &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + memory_clock, &memory_level->MinMvdd); + if (ret) + return ret; + } + + memory_level->MinVddcPhases = 1; + + if (pi->vddc_phase_shed_control) + ci_populate_phase_value_based_on_mclk(rdev, + &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, + memory_clock, + &memory_level->MinVddcPhases); + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpH = 0; + memory_level->DownH = 100; + memory_level->VoltageDownH = 0; + memory_level->ActivityLevel = (u16)pi->mclk_activity_target; + + memory_level->StutterEnable = false; + memory_level->StrobeEnable = false; + memory_level->EdcReadEnable = false; + memory_level->EdcWriteEnable = false; + memory_level->RttEnable = false; + + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (pi->mclk_stutter_mode_threshold && + (memory_clock <= pi->mclk_stutter_mode_threshold) && + (pi->uvd_enabled == false) && + (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && + (rdev->pm.dpm.new_active_crtc_count <= 2)) + memory_level->StutterEnable = true; + + if (pi->mclk_strobe_mode_threshold && + (memory_clock <= pi->mclk_strobe_mode_threshold)) + memory_level->StrobeEnable = 1; + + if (pi->mem_gddr5) { + memory_level->StrobeRatio = + si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); + if (pi->mclk_edc_enable_threshold && + (memory_clock > pi->mclk_edc_enable_threshold)) + memory_level->EdcReadEnable = true; + + if (pi->mclk_edc_wr_enable_threshold && + (memory_clock > pi->mclk_edc_wr_enable_threshold)) + memory_level->EdcWriteEnable = true; + + if (memory_level->StrobeEnable) { + if (si_get_mclk_frequency_ratio(memory_clock, true) >= + ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + else + dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; + } else { + dll_state_on = pi->dll_default_on; + } + } else { + memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + } + + ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + if (ret) + return ret; + + memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); + memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); + memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); + + memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); + memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); + memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); + memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); + memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); + memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); + memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); + memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); + memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); + memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); + memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); + + return 0; +} + +static int ci_populate_smc_acpi_level(struct radeon_device *rdev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct atom_clock_dividers dividers; + SMU7_Discrete_VoltageLevel voltage_level; + u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; + u32 dll_cntl = pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; + int ret; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (pi->acpi_vddc) + table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; + + table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_SCLK, + table->ACPILevel.SclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->ACPILevel.SclkDid = (u8)dividers.post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl &= ~SPLL_PWRON; + spll_func_cntl |= SPLL_RESET; + + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= SCLK_MUX_SEL(4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; + table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; + table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; + table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); + table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); + table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); + table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); + table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); + table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); + table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); + table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); + table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); + table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); + table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); + + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + if (pi->acpi_vddci) + table->MemoryACPILevel.MinVddci = + cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = + cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); + } + + if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = 0; + else + table->MemoryACPILevel.MinMvdd = + cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); + + mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); + + table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); + table->MemoryACPILevel.MpllDqFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); + table->MemoryACPILevel.MpllFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_func_cntl); + table->MemoryACPILevel.MpllFuncCntl_1 = + cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); + table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); + table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpH = 0; + table->MemoryACPILevel.DownH = 100; + table->MemoryACPILevel.VoltageDownH = 0; + table->MemoryACPILevel.ActivityLevel = + cpu_to_be16((u16)pi->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = false; + table->MemoryACPILevel.StrobeEnable = false; + table->MemoryACPILevel.EdcReadEnable = false; + table->MemoryACPILevel.EdcWriteEnable = false; + table->MemoryACPILevel.RttEnable = false; + + return 0; +} + + +static int ci_enable_ulv(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ulv_parm *ulv = &pi->ulv; + + if (ulv->supported) { + if (enable) + return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + else + return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + + return 0; +} + +static int ci_populate_ulv_level(struct radeon_device *rdev, + SMU7_Discrete_Ulv *state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + if (ulv_voltage == 0) { + pi->ulv.supported = false; + return 0; + } + + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) + state->VddcOffset = 0; + else + state->VddcOffset = + rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; + } else { + if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) + state->VddcOffsetVid = 0; + else + state->VddcOffsetVid = (u8) + ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; + + state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); + state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); + state->VddcOffset = cpu_to_be16(state->VddcOffset); + + return 0; +} + +static int ci_calculate_sclk_params(struct radeon_device *rdev, + u32 engine_clock, + SMU7_Discrete_GraphicsLevel *sclk) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct atom_clock_dividers dividers; + u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; + u32 reference_clock = rdev->clock.spll.reference_freq; + u32 reference_divider; + u32 fbdiv; + int ret; + + ret = radeon_atom_get_clock_dividers(rdev, + COMPUTE_GPUCLK_INPUT_FLAG_SCLK, + engine_clock, false, ÷rs); + if (ret) + return ret; + + reference_divider = 1 + dividers.ref_div; + fbdiv = dividers.fb_div & 0x3FFFFFF; + + spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); + spll_func_cntl_3 |= SPLL_DITHEN; + + if (pi->caps_sclk_ss_support) { + struct radeon_atom_ss ss; + u32 vco_freq = engine_clock * dividers.post_div; + + if (radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum &= ~CLK_S_MASK; + cg_spll_spread_spectrum |= CLK_S(clk_s); + cg_spll_spread_spectrum |= SSEN; + + cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; + cg_spll_spread_spectrum_2 |= CLK_V(clk_v); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (u8)dividers.post_divider; + + return 0; +} + +static int ci_populate_single_graphic_level(struct radeon_device *rdev, + u32 engine_clock, + u16 sclk_activity_level_t, + SMU7_Discrete_GraphicsLevel *graphic_level) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + + ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); + if (ret) + return ret; + + ret = ci_get_dependency_volt_by_clk(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + engine_clock, &graphic_level->MinVddc); + if (ret) + return ret; + + graphic_level->SclkFrequency = engine_clock; + + graphic_level->Flags = 0; + graphic_level->MinVddcPhases = 1; + + if (pi->vddc_phase_shed_control) + ci_populate_phase_value_based_on_sclk(rdev, + &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + + graphic_level->ActivityLevel = sclk_activity_level_t; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + graphic_level->EnabledForActivity = 1; + graphic_level->EnabledForThrottle = 1; + graphic_level->UpH = 0; + graphic_level->DownH = 0; + graphic_level->VoltageDownH = 0; + graphic_level->PowerThrottle = 0; + + if (pi->caps_sclk_ds) + graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, + engine_clock, + CISLAND_MINIMUM_ENGINE_CLOCK); + + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + graphic_level->Flags = cpu_to_be32(graphic_level->Flags); + graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); + graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); + graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); + graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); + graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); + graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); + graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); + graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); + graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); + graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); + + return 0; +} + +static int ci_populate_all_graphic_levels(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 level_array_address = pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; + u32 i, ret; + + memset(levels, 0, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + ret = ci_populate_single_graphic_level(rdev, + dpm_table->sclk_table.dpm_levels[i].value, + (u16)pi->activity_target[i], + &pi->smc_state_table.GraphicsLevel[i]); + if (ret) + return ret; + if (i == (dpm_table->sclk_table.count - 1)) + pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + } + + pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; + pi->dpm_level_enable_mask.sclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + ret = ci_copy_bytes_to_smc(rdev, level_array_address, + (u8 *)levels, level_array_size, + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static int ci_populate_ulv_state(struct radeon_device *rdev, + SMU7_Discrete_Ulv *ulv_level) +{ + return ci_populate_ulv_level(rdev, ulv_level); +} + +static int ci_populate_all_memory_levels(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 level_array_address = pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); + u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * + SMU7_MAX_LEVELS_MEMORY; + SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; + u32 i, ret; + + memset(levels, 0, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + if (dpm_table->mclk_table.dpm_levels[i].value == 0) + return -EINVAL; + ret = ci_populate_single_memory_level(rdev, + dpm_table->mclk_table.dpm_levels[i].value, + &pi->smc_state_table.MemoryLevel[i]); + if (ret) + return ret; + } + + pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); + + pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + ret = ci_copy_bytes_to_smc(rdev, level_array_address, + (u8 *)levels, level_array_size, + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static void ci_reset_single_dpm_table(struct radeon_device *rdev, + struct ci_single_dpm_table* dpm_table, + u32 count) +{ + u32 i; + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) + dpm_table->dpm_levels[i].enabled = false; +} + +static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, + u32 index, u32 pcie_gen, u32 pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = true; +} + +static int ci_setup_default_pcie_tables(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) + return -EINVAL; + + if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { + pi->pcie_gen_powersaving = pi->pcie_gen_performance; + pi->pcie_lane_powersaving = pi->pcie_lane_performance; + } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { + pi->pcie_gen_performance = pi->pcie_gen_powersaving; + pi->pcie_lane_performance = pi->pcie_lane_powersaving; + } + + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.pcie_speed_table, + SMU7_MAX_LEVELS_LINK); + + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, + pi->pcie_gen_powersaving.min, + pi->pcie_lane_powersaving.min); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, + pi->pcie_gen_performance.min, + pi->pcie_lane_performance.min); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, + pi->pcie_gen_powersaving.min, + pi->pcie_lane_powersaving.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, + pi->pcie_gen_performance.min, + pi->pcie_lane_performance.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, + pi->pcie_gen_powersaving.max, + pi->pcie_lane_powersaving.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, + pi->pcie_gen_performance.max, + pi->pcie_lane_performance.max); + + pi->dpm_table.pcie_speed_table.count = 6; + + return 0; +} + +static int ci_setup_default_dpm_tables(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + struct radeon_clock_voltage_dependency_table *allowed_mclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; + struct radeon_cac_leakage_table *std_voltage_table = + &rdev->pm.dpm.dyn_state.cac_leakage_table; + u32 i; + + if (allowed_sclk_vddc_table == NULL) + return -EINVAL; + if (allowed_sclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_table == NULL) + return -EINVAL; + if (allowed_mclk_table->count < 1) + return -EINVAL; + + memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); + + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.sclk_table, + SMU7_MAX_LEVELS_GRAPHICS); + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.mclk_table, + SMU7_MAX_LEVELS_MEMORY); + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.vddc_table, + SMU7_MAX_LEVELS_VDDC); + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.vddci_table, + SMU7_MAX_LEVELS_VDDCI); + ci_reset_single_dpm_table(rdev, + &pi->dpm_table.mvdd_table, + SMU7_MAX_LEVELS_MVDD); + + pi->dpm_table.sclk_table.count = 0; + for (i = 0; i < allowed_sclk_vddc_table->count; i++) { + if ((i == 0) || + (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != + allowed_sclk_vddc_table->entries[i].clk)) { + pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = + allowed_sclk_vddc_table->entries[i].clk; + pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true; + pi->dpm_table.sclk_table.count++; + } + } + + pi->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_mclk_table->count; i++) { + if ((i==0) || + (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != + allowed_mclk_table->entries[i].clk)) { + pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = + allowed_mclk_table->entries[i].clk; + pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true; + pi->dpm_table.mclk_table.count++; + } + } + + for (i = 0; i < allowed_sclk_vddc_table->count; i++) { + pi->dpm_table.vddc_table.dpm_levels[i].value = + allowed_sclk_vddc_table->entries[i].v; + pi->dpm_table.vddc_table.dpm_levels[i].param1 = + std_voltage_table->entries[i].leakage; + pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; + + allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; + if (allowed_mclk_table) { + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.vddci_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.vddci_table.count = allowed_mclk_table->count; + } + + allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; + if (allowed_mclk_table) { + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.mvdd_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; + } + + ci_setup_default_pcie_tables(rdev); + + return 0; +} + +static int ci_find_boot_level(struct ci_single_dpm_table *table, + u32 value, u32 *boot_level) +{ + u32 i; + int ret = -EINVAL; + + for(i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + ret = 0; + } + } + + return ret; +} + +static int ci_init_smc_table(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ulv_parm *ulv = &pi->ulv; + struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; + SMU7_Discrete_DpmTable *table = &pi->smc_state_table; + int ret; + + ret = ci_setup_default_dpm_tables(rdev); + if (ret) + return ret; + + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) + ci_populate_smc_voltage_tables(rdev, table); + + ci_init_fps_limits(rdev); + + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (pi->mem_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (ulv->supported) { + ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); + if (ret) + return ret; + WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); + } + + ret = ci_populate_all_graphic_levels(rdev); + if (ret) + return ret; + + ret = ci_populate_all_memory_levels(rdev); + if (ret) + return ret; + + ci_populate_smc_link_level(rdev, table); + + ret = ci_populate_smc_acpi_level(rdev, table); + if (ret) + return ret; + + ret = ci_populate_smc_vce_level(rdev, table); + if (ret) + return ret; + + ret = ci_populate_smc_acp_level(rdev, table); + if (ret) + return ret; + + ret = ci_populate_smc_samu_level(rdev, table); + if (ret) + return ret; + + ret = ci_do_program_memory_timing_parameters(rdev); + if (ret) + return ret; + + ret = ci_populate_smc_uvd_level(rdev, table); + if (ret) + return ret; + + table->UvdBootLevel = 0; + table->VceBootLevel = 0; + table->AcpBootLevel = 0; + table->SamuBootLevel = 0; + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + ret = ci_find_boot_level(&pi->dpm_table.sclk_table, + pi->vbios_boot_state.sclk_bootup_value, + (u32 *)&pi->smc_state_table.GraphicsBootLevel); + + ret = ci_find_boot_level(&pi->dpm_table.mclk_table, + pi->vbios_boot_state.mclk_bootup_value, + (u32 *)&pi->smc_state_table.MemoryBootLevel); + + table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; + table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; + table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; + + ci_populate_smc_initial_state(rdev, radeon_boot_state); + + ret = ci_populate_bapm_parameters_in_dpm_table(rdev); + if (ret) + return ret; + + table->UVDInterval = 1; + table->VCEInterval = 1; + table->ACPInterval = 1; + table->SAMUInterval = 1; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * + CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); + table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * + CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->VddcVddciDelta = 4000; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) + table->SVI2Enable = 1; + else + table->SVI2Enable = 0; + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + table->SystemFlags = cpu_to_be32(table->SystemFlags); + table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); + table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); + table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); + table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); + table->SclkStepSize = cpu_to_be32(table->SclkStepSize); + table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); + table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); + table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); + table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); + table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); + table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); + + ret = ci_copy_bytes_to_smc(rdev, + pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, SystemFlags), + (u8 *)&table->SystemFlags, + sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static void ci_trim_single_dpm_states(struct radeon_device *rdev, + struct ci_single_dpm_table *dpm_table, + u32 low_limit, u32 high_limit) +{ + u32 i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } +} + +static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, + u32 speed_low, u32 lanes_low, + u32 speed_high, u32 lanes_high) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + u32 i, j; + + for (i = 0; i < pcie_table->count; i++) { + if ((pcie_table->dpm_levels[i].value < speed_low) || + (pcie_table->dpm_levels[i].param1 < lanes_low) || + (pcie_table->dpm_levels[i].value > speed_high) || + (pcie_table->dpm_levels[i].param1 > lanes_high)) + pcie_table->dpm_levels[i].enabled = false; + else + pcie_table->dpm_levels[i].enabled = true; + } + + for (i = 0; i < pcie_table->count; i++) { + if (pcie_table->dpm_levels[i].enabled) { + for (j = i + 1; j < pcie_table->count; j++) { + if (pcie_table->dpm_levels[j].enabled) { + if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && + (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) + pcie_table->dpm_levels[j].enabled = false; + } + } + } + } +} + +static int ci_trim_dpm_states(struct radeon_device *rdev, + struct radeon_ps *radeon_state) +{ + struct ci_ps *state = ci_get_ps(radeon_state); + struct ci_power_info *pi = ci_get_pi(rdev); + u32 high_limit_count; + + if (state->performance_level_count < 1) + return -EINVAL; + + if (state->performance_level_count == 1) + high_limit_count = 0; + else + high_limit_count = 1; + + ci_trim_single_dpm_states(rdev, + &pi->dpm_table.sclk_table, + state->performance_levels[0].sclk, + state->performance_levels[high_limit_count].sclk); + + ci_trim_single_dpm_states(rdev, + &pi->dpm_table.mclk_table, + state->performance_levels[0].mclk, + state->performance_levels[high_limit_count].mclk); + + ci_trim_pcie_dpm_states(rdev, + state->performance_levels[0].pcie_gen, + state->performance_levels[0].pcie_lane, + state->performance_levels[high_limit_count].pcie_gen, + state->performance_levels[high_limit_count].pcie_lane); + + return 0; +} + +static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) +{ + struct radeon_clock_voltage_dependency_table *disp_voltage_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; + struct radeon_clock_voltage_dependency_table *vddc_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 requested_voltage = 0; + u32 i; + + if (disp_voltage_table == NULL) + return -EINVAL; + if (!disp_voltage_table->count) + return -EINVAL; + + for (i = 0; i < disp_voltage_table->count; i++) { + if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) + requested_voltage = disp_voltage_table->entries[i].v; + } + + for (i = 0; i < vddc_table->count; i++) { + if (requested_voltage <= vddc_table->entries[i].v) { + requested_voltage = vddc_table->entries[i].v; + return (ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_VddC_Request, + requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + } + + return -EINVAL; +} + +static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result result; + + if (!pi->sclk_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } + + if (!pi->mclk_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } + + if (!pi->pcie_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + result = ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_PCIeDPM_SetEnabledMask, + pi->dpm_level_enable_mask.pcie_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } + + ci_apply_disp_minimum_voltage_request(rdev); + + return 0; +} + +static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, + struct radeon_ps *radeon_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ps *state = ci_get_ps(radeon_state); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; + u32 i; + + pi->need_update_smu7_dpm_table = 0; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) { + pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } else { + /* XXX check display min clock requirements */ + if (0 != CISLAND_MINIMUM_ENGINE_CLOCK) + pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) + pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + if (rdev->pm.dpm.current_active_crtc_count != + rdev->pm.dpm.new_active_crtc_count) + pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; +} + +static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, + struct radeon_ps *radeon_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ps *state = ci_get_ps(radeon_state); + u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; + u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; + struct ci_dpm_table *dpm_table = &pi->dpm_table; + int ret; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) + dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; + + if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) + dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; + + if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { + ret = ci_populate_all_graphic_levels(rdev); + if (ret) + return ret; + } + + if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { + ret = ci_populate_all_memory_levels(rdev); + if (ret) + return ret; + } + + return 0; +} + +static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct radeon_clock_and_voltage_limits *max_limits; + int i; + + if (rdev->pm.dpm.ac_power) + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; + + for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; + + if (!pi->caps_uvd_dpm) + break; + } + } + + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + pi->dpm_level_enable_mask.uvd_dpm_enable_mask); + + if (pi->last_mclk_dpm_enable_mask & 0x1) { + pi->uvd_enabled = true; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + } else { + if (pi->last_mclk_dpm_enable_mask & 0x1) { + pi->uvd_enabled = false; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + } + + return (ci_send_msg_to_smc(rdev, enable ? + PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +#if 0 +static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct radeon_clock_and_voltage_limits *max_limits; + int i; + + if (rdev->pm.dpm.ac_power) + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; + for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; + + if (!pi->caps_vce_dpm) + break; + } + } + + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_VCEDPM_SetEnabledMask, + pi->dpm_level_enable_mask.vce_dpm_enable_mask); + } + + return (ci_send_msg_to_smc(rdev, enable ? + PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct radeon_clock_and_voltage_limits *max_limits; + int i; + + if (rdev->pm.dpm.ac_power) + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; + for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; + + if (!pi->caps_samu_dpm) + break; + } + } + + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + pi->dpm_level_enable_mask.samu_dpm_enable_mask); + } + return (ci_send_msg_to_smc(rdev, enable ? + PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + const struct radeon_clock_and_voltage_limits *max_limits; + int i; + + if (rdev->pm.dpm.ac_power) + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; + for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; + + if (!pi->caps_acp_dpm) + break; + } + } + + ci_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + pi->dpm_level_enable_mask.acp_dpm_enable_mask); + } + + return (ci_send_msg_to_smc(rdev, enable ? + PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} +#endif + +static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + + if (!gate) { + if (pi->caps_uvd_dpm || + (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) + pi->smc_state_table.UvdBootLevel = 0; + else + pi->smc_state_table.UvdBootLevel = + rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; + + tmp = RREG32_SMC(DPM_TABLE_475); + tmp &= ~UvdBootLevel_MASK; + tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); + WREG32_SMC(DPM_TABLE_475, tmp); + } + + return ci_enable_uvd_dpm(rdev, !gate); +} + +#if 0 +static u8 ci_get_vce_boot_level(struct radeon_device *rdev) +{ + u8 i; + u32 min_evclk = 30000; /* ??? */ + struct radeon_vce_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].evclk >= min_evclk) + return i; + } + + return table->count - 1; +} + +static int ci_update_vce_dpm(struct radeon_device *rdev, + struct radeon_ps *radeon_new_state, + struct radeon_ps *radeon_current_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0); + bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0); + int ret = 0; + u32 tmp; + + if (new_vce_clock_non_zero != old_vce_clock_non_zero) { + if (new_vce_clock_non_zero) { + pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); + + tmp = RREG32_SMC(DPM_TABLE_475); + tmp &= ~VceBootLevel_MASK; + tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); + WREG32_SMC(DPM_TABLE_475, tmp); + + ret = ci_enable_vce_dpm(rdev, true); + } else { + ret = ci_enable_vce_dpm(rdev, false); + } + } + return ret; +} + +static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) +{ + return ci_enable_samu_dpm(rdev, gate); +} + +static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + + if (!gate) { + pi->smc_state_table.AcpBootLevel = 0; + + tmp = RREG32_SMC(DPM_TABLE_475); + tmp &= ~AcpBootLevel_MASK; + tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); + WREG32_SMC(DPM_TABLE_475, tmp); + } + + return ci_enable_acp_dpm(rdev, !gate); +} +#endif + +static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, + struct radeon_ps *radeon_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + + ret = ci_trim_dpm_states(rdev, radeon_state); + if (ret) + return ret; + + pi->dpm_level_enable_mask.sclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); + pi->dpm_level_enable_mask.mclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); + pi->last_mclk_dpm_enable_mask = + pi->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (pi->uvd_enabled) { + if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) + pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + } + pi->dpm_level_enable_mask.pcie_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); + + return 0; +} + +static int ci_set_mc_special_registers(struct radeon_device *rdev, + struct ci_mc_reg_table *table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u8 i, j, k; + u32 temp_reg; + + for (i = 0, j = table->last; i < table->last; i++) { + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + switch(table->mc_reg_address[i].s1 << 2) { + case MC_SEQ_MISC1: + temp_reg = RREG32(MC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + temp_reg = RREG32(MC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + if (!pi->mem_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + if (!pi->mem_gddr5) { + table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; + table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + } + break; + case MC_SEQ_RESERVE_M: + temp_reg = RREG32(MC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + break; + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) +{ + bool result = true; + + switch(in_reg) { + case MC_SEQ_RAS_TIMING >> 2: + *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; + break; + case MC_SEQ_DLL_STBY >> 2: + *out_reg = MC_SEQ_DLL_STBY_LP >> 2; + break; + case MC_SEQ_G5PDX_CMD0 >> 2: + *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; + break; + case MC_SEQ_G5PDX_CMD1 >> 2: + *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; + break; + case MC_SEQ_G5PDX_CTRL >> 2: + *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; + break; + case MC_SEQ_CAS_TIMING >> 2: + *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; + break; + case MC_SEQ_MISC_TIMING >> 2: + *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; + break; + case MC_SEQ_MISC_TIMING2 >> 2: + *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; + break; + case MC_SEQ_PMG_DVS_CMD >> 2: + *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; + break; + case MC_SEQ_PMG_DVS_CTL >> 2: + *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; + break; + case MC_SEQ_RD_CTL_D0 >> 2: + *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; + break; + case MC_SEQ_RD_CTL_D1 >> 2: + *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; + break; + case MC_SEQ_WR_CTL_D0 >> 2: + *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; + break; + case MC_SEQ_WR_CTL_D1 >> 2: + *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; + break; + case MC_PMG_CMD_EMRS >> 2: + *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; + break; + case MC_PMG_CMD_MRS >> 2: + *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; + break; + case MC_PMG_CMD_MRS1 >> 2: + *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; + break; + case MC_SEQ_PMG_TIMING >> 2: + *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; + break; + case MC_PMG_CMD_MRS2 >> 2: + *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; + break; + case MC_SEQ_WR_CTL_2 >> 2: + *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; + break; + default: + result = false; + break; + } + + return result; +} + +static void ci_set_valid_flag(struct ci_mc_reg_table *table) +{ + u8 i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->valid_flag |= 1 << i; + break; + } + } + } +} + +static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) +{ + u32 i; + u16 address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? + address : table->mc_reg_address[i].s1; + } +} + +static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, + struct ci_mc_reg_table *ci_table) +{ + u8 i, j; + + if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + if (table->num_entries > MAX_AC_TIMING_ENTRIES) + return -EINVAL; + + for (i = 0; i < table->last; i++) + ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ci_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ci_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) + ci_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + ci_table->num_entries = table->num_entries; + + return 0; +} + +static int ci_initialize_mc_reg_table(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct atom_mc_reg_table *table; + struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; + u8 module_index = rv770_get_memory_module_index(rdev); + int ret; + + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); + WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); + WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); + WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); + WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); + WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); + WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); + WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); + WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); + WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); + WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); + WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); + WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); + WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); + WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); + WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); + WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); + WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); + WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); + WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); + + ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); + if (ret) + goto init_mc_done; + + ret = ci_copy_vbios_mc_reg_table(table, ci_table); + if (ret) + goto init_mc_done; + + ci_set_s0_mc_reg_index(ci_table); + + ret = ci_set_mc_special_registers(rdev, ci_table); + if (ret) + goto init_mc_done; + + ci_set_valid_flag(ci_table); + +init_mc_done: + kfree(table); + + return ret; +} + +static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 i, j; + + for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { + if (pi->mc_reg_table.valid_flag & (1 << j)) { + if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (u8)i; + + return 0; +} + +static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, + SMU7_Discrete_MCRegisterSet *data, + u32 num_entries, u32 valid_flag) +{ + u32 i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & (1 << j)) { + data->value[i] = cpu_to_be32(entry->mc_data[j]); + i++; + } + } +} + +static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, + const u32 memory_clock, + SMU7_Discrete_MCRegisterSet *mc_reg_table_data) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 i = 0; + + for(i = 0; i < pi->mc_reg_table.num_entries; i++) { + if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) + break; + } + + if ((i == pi->mc_reg_table.num_entries) && (i > 0)) + --i; + + ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, pi->mc_reg_table.last, + pi->mc_reg_table.valid_flag); +} + +static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 i; + + for (i = 0; i < pi->dpm_table.mclk_table.count; i++) + ci_convert_mc_reg_table_entry_to_smc(rdev, + pi->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i]); +} + +static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + int ret; + + memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); + + ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); + if (ret) + return ret; + ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); + + return ci_copy_bytes_to_smc(rdev, + pi->mc_reg_table_start, + (u8 *)&pi->smc_mc_reg_table, + sizeof(SMU7_Discrete_MCRegisters), + pi->sram_end); +} + +static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + + if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); + + ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); + + return ci_copy_bytes_to_smc(rdev, + pi->mc_reg_table_start + + offsetof(SMU7_Discrete_MCRegisters, data[0]), + (u8 *)&pi->smc_mc_reg_table.data[0], + sizeof(SMU7_Discrete_MCRegisterSet) * + pi->dpm_table.mclk_table.count, + pi->sram_end); +} + +static void ci_enable_voltage_control(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(GENERAL_PWRMGT); + + tmp |= VOLT_PWRMGT_EN; + WREG32_SMC(GENERAL_PWRMGT, tmp); +} + +static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, + struct radeon_ps *radeon_state) +{ + struct ci_ps *state = ci_get_ps(radeon_state); + int i; + u16 pcie_speed, max_speed = 0; + + for (i = 0; i < state->performance_level_count; i++) { + pcie_speed = state->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + + return max_speed; +} + +static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) +{ + u32 speed_cntl = 0; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; + + return (u16)speed_cntl; +} + +static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) +{ + u32 link_width = 0; + + link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; + link_width >>= LC_LINK_WIDTH_RD_SHIFT; + + switch (link_width) { + case RADEON_PCIE_LC_LINK_WIDTH_X1: + return 1; + case RADEON_PCIE_LC_LINK_WIDTH_X2: + return 2; + case RADEON_PCIE_LC_LINK_WIDTH_X4: + return 4; + case RADEON_PCIE_LC_LINK_WIDTH_X8: + return 8; + case RADEON_PCIE_LC_LINK_WIDTH_X12: + /* not actually supported */ + return 12; + case RADEON_PCIE_LC_LINK_WIDTH_X0: + case RADEON_PCIE_LC_LINK_WIDTH_X16: + default: + return 16; + } +} + +static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, + struct radeon_ps *radeon_new_state, + struct radeon_ps *radeon_current_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + enum radeon_pcie_gen target_link_speed = + ci_get_maximum_link_speed(rdev, radeon_new_state); + enum radeon_pcie_gen current_link_speed; + + if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) + current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); + else + current_link_speed = pi->force_pcie_gen; + + pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; + pi->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { + case RADEON_PCIE_GEN3: + if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) + break; + pi->force_pcie_gen = RADEON_PCIE_GEN2; + if (current_link_speed == RADEON_PCIE_GEN2) + break; + case RADEON_PCIE_GEN2: + if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) + break; + default: + pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); + break; + } + } else { + if (target_link_speed < current_link_speed) + pi->pspp_notify_required = true; + } +} + +static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, + struct radeon_ps *radeon_new_state, + struct radeon_ps *radeon_current_state) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + enum radeon_pcie_gen target_link_speed = + ci_get_maximum_link_speed(rdev, radeon_new_state); + u8 request; + + if (pi->pspp_notify_required) { + if (target_link_speed == RADEON_PCIE_GEN3) + request = PCIE_PERF_REQ_PECI_GEN3; + else if (target_link_speed == RADEON_PCIE_GEN2) + request = PCIE_PERF_REQ_PECI_GEN2; + else + request = PCIE_PERF_REQ_PECI_GEN1; + + if ((request == PCIE_PERF_REQ_PECI_GEN1) && + (ci_get_current_pcie_speed(rdev) > 0)) + return; + + radeon_acpi_pcie_performance_request(rdev, request, false); + } +} + +static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; + struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = + &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; + + if (allowed_sclk_vddc_table == NULL) + return -EINVAL; + if (allowed_sclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_vddc_table == NULL) + return -EINVAL; + if (allowed_mclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_vddci_table == NULL) + return -EINVAL; + if (allowed_mclk_vddci_table->count < 1) + return -EINVAL; + + pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; + pi->max_vddc_in_pp_table = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; + pi->max_vddci_in_pp_table = + allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = + allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + + return 0; +} + +static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; + u32 leakage_index; + + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + if (leakage_table->leakage_id[leakage_index] == *vddc) { + *vddc = leakage_table->actual_voltage[leakage_index]; + break; + } + } +} + +static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; + u32 leakage_index; + + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + if (leakage_table->leakage_id[leakage_index] == *vddci) { + *vddci = leakage_table->actual_voltage[leakage_index]; + break; + } + } +} + +static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); + } +} + +static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, + struct radeon_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); + } +} + +static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_vce_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); + } +} + +static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_uvd_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); + } +} + +static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_phase_shedding_limits_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); + } +} + +static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_clock_and_voltage_limits *table) +{ + if (table) { + ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); + ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); + } +} + +static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, + struct radeon_cac_leakage_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); + } +} + +static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) +{ + + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); + ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, + &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); + ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); + ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); + ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); + ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); + ci_patch_cac_leakage_table_with_vddc_leakage(rdev, + &rdev->pm.dpm.dyn_state.cac_leakage_table); + +} + +static void ci_get_memory_type(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 tmp; + + tmp = RREG32(MC_SEQ_MISC0); + + if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == + MC_SEQ_MISC0_GDDR5_VALUE) + pi->mem_gddr5 = true; + else + pi->mem_gddr5 = false; + +} + +void ci_update_current_ps(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct ci_ps *new_ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(rdev); + + pi->current_rps = *rps; + pi->current_ps = *new_ps; + pi->current_rps.ps_priv = &pi->current_ps; +} + +void ci_update_requested_ps(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct ci_ps *new_ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(rdev); + + pi->requested_rps = *rps; + pi->requested_ps = *new_ps; + pi->requested_rps.ps_priv = &pi->requested_ps; +} + +int ci_dpm_pre_set_power_state(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; + struct radeon_ps *new_ps = &requested_ps; + + ci_update_requested_ps(rdev, new_ps); + + ci_apply_state_adjust_rules(rdev, &pi->requested_rps); + + return 0; +} + +void ci_dpm_post_set_power_state(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps *new_ps = &pi->requested_rps; + + ci_update_current_ps(rdev, new_ps); +} + + +void ci_dpm_setup_asic(struct radeon_device *rdev) +{ + ci_read_clock_registers(rdev); + ci_get_memory_type(rdev); + ci_enable_acpi_power_management(rdev); + ci_init_sclk_t(rdev); +} + +int ci_dpm_enable(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; + int ret; + + if (ci_is_smc_running(rdev)) + return -EINVAL; + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + ci_enable_voltage_control(rdev); + ret = ci_construct_voltage_tables(rdev); + if (ret) { + DRM_ERROR("ci_construct_voltage_tables failed\n"); + return ret; + } + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_initialize_mc_reg_table(rdev); + if (ret) + pi->caps_dynamic_ac_timing = false; + } + if (pi->dynamic_ss) + ci_enable_spread_spectrum(rdev, true); + if (pi->thermal_protection) + ci_enable_thermal_protection(rdev, true); + ci_program_sstp(rdev); + ci_enable_display_gap(rdev); + ci_program_vc(rdev); + ret = ci_upload_firmware(rdev); + if (ret) { + DRM_ERROR("ci_upload_firmware failed\n"); + return ret; + } + ret = ci_process_firmware_header(rdev); + if (ret) { + DRM_ERROR("ci_process_firmware_header failed\n"); + return ret; + } + ret = ci_initial_switch_from_arb_f0_to_f1(rdev); + if (ret) { + DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); + return ret; + } + ret = ci_init_smc_table(rdev); + if (ret) { + DRM_ERROR("ci_init_smc_table failed\n"); + return ret; + } + ret = ci_init_arb_table_index(rdev); + if (ret) { + DRM_ERROR("ci_init_arb_table_index failed\n"); + return ret; + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_populate_initial_mc_reg_table(rdev); + if (ret) { + DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); + return ret; + } + } + ret = ci_populate_pm_base(rdev); + if (ret) { + DRM_ERROR("ci_populate_pm_base failed\n"); + return ret; + } + ci_dpm_start_smc(rdev); + ci_enable_vr_hot_gpio_interrupt(rdev); + ret = ci_notify_smc_display_change(rdev, false); + if (ret) { + DRM_ERROR("ci_notify_smc_display_change failed\n"); + return ret; + } + ci_enable_sclk_control(rdev, true); + ret = ci_enable_ulv(rdev, true); + if (ret) { + DRM_ERROR("ci_enable_ulv failed\n"); + return ret; + } + ret = ci_enable_ds_master_switch(rdev, true); + if (ret) { + DRM_ERROR("ci_enable_ds_master_switch failed\n"); + return ret; + } + ret = ci_start_dpm(rdev); + if (ret) { + DRM_ERROR("ci_start_dpm failed\n"); + return ret; + } + ret = ci_enable_didt(rdev, true); + if (ret) { + DRM_ERROR("ci_enable_didt failed\n"); + return ret; + } + ret = ci_enable_smc_cac(rdev, true); + if (ret) { + DRM_ERROR("ci_enable_smc_cac failed\n"); + return ret; + } + ret = ci_enable_power_containment(rdev, true); + if (ret) { + DRM_ERROR("ci_enable_power_containment failed\n"); + return ret; + } + if (rdev->irq.installed && + r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { +#if 0 + PPSMC_Result result; +#endif + ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) { + DRM_ERROR("ci_set_thermal_temperature_range failed\n"); + return ret; + } + rdev->irq.dpm_thermal = true; + radeon_irq_set(rdev); +#if 0 + result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); + + if (result != PPSMC_Result_OK) + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); +#endif + } + + ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + + ci_update_current_ps(rdev, boot_ps); + + return 0; +} + +void ci_dpm_disable(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; + + if (!ci_is_smc_running(rdev)) + return; + + if (pi->thermal_protection) + ci_enable_thermal_protection(rdev, false); + ci_enable_power_containment(rdev, false); + ci_enable_smc_cac(rdev, false); + ci_enable_didt(rdev, false); + ci_enable_spread_spectrum(rdev, false); + ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + ci_stop_dpm(rdev); + ci_enable_ds_master_switch(rdev, true); + ci_enable_ulv(rdev, false); + ci_clear_vc(rdev); + ci_reset_to_default(rdev); + ci_dpm_stop_smc(rdev); + ci_force_switch_to_arb_f0(rdev); + + ci_update_current_ps(rdev, boot_ps); +} + +int ci_dpm_set_power_state(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps *new_ps = &pi->requested_rps; + struct radeon_ps *old_ps = &pi->current_rps; + int ret; + + ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); + if (pi->pcie_performance_request) + ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); + ret = ci_freeze_sclk_mclk_dpm(rdev); + if (ret) { + DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); + return ret; + } + ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); + if (ret) { + DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); + return ret; + } + ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); + if (ret) { + DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); + return ret; + } +#if 0 + ret = ci_update_vce_dpm(rdev, new_ps, old_ps); + if (ret) { + DRM_ERROR("ci_update_vce_dpm failed\n"); + return ret; + } +#endif + ret = ci_update_uvd_dpm(rdev, false); + if (ret) { + DRM_ERROR("ci_update_uvd_dpm failed\n"); + return ret; + } + ret = ci_update_sclk_t(rdev); + if (ret) { + DRM_ERROR("ci_update_sclk_t failed\n"); + return ret; + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_update_and_upload_mc_reg_table(rdev); + if (ret) { + DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); + return ret; + } + } + ret = ci_program_memory_timing_parameters(rdev); + if (ret) { + DRM_ERROR("ci_program_memory_timing_parameters failed\n"); + return ret; + } + ret = ci_unfreeze_sclk_mclk_dpm(rdev); + if (ret) { + DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); + return ret; + } + ret = ci_upload_dpm_level_enable_mask(rdev); + if (ret) { + DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); + return ret; + } + if (pi->pcie_performance_request) + ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); + + return 0; +} + +int ci_dpm_power_control_set_level(struct radeon_device *rdev) +{ + return ci_power_control_set_level(rdev); +} + +void ci_dpm_reset_asic(struct radeon_device *rdev) +{ + ci_set_boot_state(rdev); +} + +void ci_dpm_display_configuration_changed(struct radeon_device *rdev) +{ + ci_program_display_gap(rdev); +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_SI_CLOCK_INFO si; + struct _ATOM_PPLIB_CI_CLOCK_INFO ci; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, + struct radeon_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) + rdev->pm.dpm.boot_ps = rps; + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + rdev->pm.dpm.uvd_ps = rps; +} + +static void ci_parse_pplib_clock_info(struct radeon_device *rdev, + struct radeon_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ps *ps = ci_get_ps(rps); + struct ci_pl *pl = &ps->performance_levels[index]; + + ps->performance_level_count = index + 1; + + pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); + pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; + pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); + pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; + + pl->pcie_gen = r600_get_pcie_gen_support(rdev, + pi->sys_pcie_mask, + pi->vbios_boot_state.pcie_gen_bootup_value, + clock_info->ci.ucPCIEGen); + pl->pcie_lane = r600_get_pcie_lane_support(rdev, + pi->vbios_boot_state.pcie_lane_bootup_value, + le16_to_cpu(clock_info->ci.usPCIELane)); + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { + pi->acpi_pcie_gen = pl->pcie_gen; + } + + if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { + pi->ulv.supported = true; + pi->ulv.pl = *pl; + pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; + } + + /* patch up boot state */ + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + pl->mclk = pi->vbios_boot_state.mclk_bootup_value; + pl->sclk = pi->vbios_boot_state.sclk_bootup_value; + pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; + pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; + } + + switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + pi->use_pcie_powersaving_levels = true; + if (pi->pcie_gen_powersaving.max < pl->pcie_gen) + pi->pcie_gen_powersaving.max = pl->pcie_gen; + if (pi->pcie_gen_powersaving.min > pl->pcie_gen) + pi->pcie_gen_powersaving.min = pl->pcie_gen; + if (pi->pcie_lane_powersaving.max < pl->pcie_lane) + pi->pcie_lane_powersaving.max = pl->pcie_lane; + if (pi->pcie_lane_powersaving.min > pl->pcie_lane) + pi->pcie_lane_powersaving.min = pl->pcie_lane; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + pi->use_pcie_performance_levels = true; + if (pi->pcie_gen_performance.max < pl->pcie_gen) + pi->pcie_gen_performance.max = pl->pcie_gen; + if (pi->pcie_gen_performance.min > pl->pcie_gen) + pi->pcie_gen_performance.min = pl->pcie_gen; + if (pi->pcie_lane_performance.max < pl->pcie_lane) + pi->pcie_lane_performance.max = pl->pcie_lane; + if (pi->pcie_lane_performance.min > pl->pcie_lane) + pi->pcie_lane_performance.min = pl->pcie_lane; + break; + default: + break; + } +} + +static int ci_parse_power_table(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct ci_ps *ps; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * + state_array->ucNumEntries, GFP_KERNEL); + if (!rdev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); + rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); + rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); + for (i = 0; i < state_array->ucNumEntries; i++) { + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + if (!rdev->pm.power_state[i].clock_info) + return -EINVAL; + ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(rdev->pm.dpm.ps); + return -ENOMEM; + } + rdev->pm.dpm.ps[i].ps_priv = ps; + ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + k = 0; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ci_parse_pplib_clock_info(rdev, + &rdev->pm.dpm.ps[i], k, + clock_info); + k++; + } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + rdev->pm.dpm.num_ps = state_array->ucNumEntries; + return 0; +} + +int ci_get_vbios_boot_values(struct radeon_device *rdev, + struct ci_vbios_boot_state *boot_state) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + ATOM_FIRMWARE_INFO_V2_2 *firmware_info; + u8 frev, crev; + u16 data_offset; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + + data_offset); + boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); + boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); + boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); + boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); + boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); + boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); + boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); + + return 0; + } + return -EINVAL; +} + +void ci_dpm_fini(struct radeon_device *rdev) +{ + int i; + + for (i = 0; i < rdev->pm.dpm.num_ps; i++) { + kfree(rdev->pm.dpm.ps[i].ps_priv); + } + kfree(rdev->pm.dpm.ps); + kfree(rdev->pm.dpm.priv); + kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); + r600_free_extended_power_table(rdev); +} + +int ci_dpm_init(struct radeon_device *rdev) +{ + int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); + u16 data_offset, size; + u8 frev, crev; + struct ci_power_info *pi; + int ret; + u32 mask; + + pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); + if (pi == NULL) + return -ENOMEM; + rdev->pm.dpm.priv = pi; + + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret) + pi->sys_pcie_mask = 0; + else + pi->sys_pcie_mask = mask; + pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; + + pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; + pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; + pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; + pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; + + pi->pcie_lane_performance.max = 0; + pi->pcie_lane_performance.min = 16; + pi->pcie_lane_powersaving.max = 0; + pi->pcie_lane_powersaving.min = 16; + + ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); + if (ret) { + ci_dpm_fini(rdev); + return ret; + } + ret = ci_parse_power_table(rdev); + if (ret) { + ci_dpm_fini(rdev); + return ret; + } + ret = r600_parse_extended_power_table(rdev); + if (ret) { + ci_dpm_fini(rdev); + return ret; + } + + pi->dll_default_on = false; + pi->sram_end = SMC_RAM_END; + + pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; + + pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; + + pi->sclk_dpm_key_disabled = 0; + pi->mclk_dpm_key_disabled = 0; + pi->pcie_dpm_key_disabled = 0; + + pi->caps_sclk_ds = true; + + pi->mclk_strobe_mode_threshold = 40000; + pi->mclk_stutter_mode_threshold = 40000; + pi->mclk_edc_enable_threshold = 40000; + pi->mclk_edc_wr_enable_threshold = 40000; + + ci_initialize_powertune_defaults(rdev); + + pi->caps_fps = false; + + pi->caps_sclk_throttle_low_notification = false; + + ci_get_leakage_voltages(rdev); + ci_patch_dependency_tables_with_leakage(rdev); + ci_set_private_data_variables_based_on_pptable(rdev); + + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = + kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); + if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { + ci_dpm_fini(rdev); + return -ENOMEM; + } + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; + rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; + + rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; + rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; + rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; + + rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; + rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; + rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; + rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; + + pi->thermal_temp_setting.temperature_low = 99500; + pi->thermal_temp_setting.temperature_high = 100000; + pi->thermal_temp_setting.temperature_shutdown = 104000; + + pi->uvd_enabled = false; + + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; + if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { + if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + else + rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; + } + + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { + if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + else + rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; + } + + pi->vddc_phase_shed_control = true; + +#if defined(CONFIG_ACPI) + pi->pcie_performance_request = + radeon_acpi_is_pcie_performance_request_supported(rdev); +#else + pi->pcie_performance_request = false; +#endif + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + pi->caps_sclk_ss_support = true; + pi->caps_mclk_ss_support = true; + pi->dynamic_ss = true; + } else { + pi->caps_sclk_ss_support = false; + pi->caps_mclk_ss_support = false; + pi->dynamic_ss = true; + } + + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) + pi->thermal_protection = true; + else + pi->thermal_protection = false; + + pi->caps_dynamic_ac_timing = true; + + return 0; +} + +void ci_dpm_print_power_state(struct radeon_device *rdev, + struct radeon_ps *rps) +{ + struct ci_ps *ps = ci_get_ps(rps); + struct ci_pl *pl; + int i; + + r600_dpm_print_class_info(rps->class, rps->class2); + r600_dpm_print_cap_info(rps->caps); + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->performance_level_count; i++) { + pl = &ps->performance_levels[i]; + printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", + i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); + } + r600_dpm_print_ps_status(rdev, rps); +} + +u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].sclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; +} + +u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].mclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; +} diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h new file mode 100644 index 000000000000..de504b5ac33f --- /dev/null +++ b/drivers/gpu/drm/radeon/ci_dpm.h @@ -0,0 +1,331 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CI_DPM_H__ +#define __CI_DPM_H__ + +#include "ppsmc.h" + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 6 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#include "smu7_discrete.h" + +#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2 + +struct ci_pl { + u32 mclk; + u32 sclk; + enum radeon_pcie_gen pcie_gen; + u16 pcie_lane; +}; + +struct ci_ps { + u16 performance_level_count; + bool dc_compatible; + u32 sclk_t; + struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS]; +}; + +struct ci_dpm_level { + bool enabled; + u32 value; + u32 param1; +}; + +#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define CISLAND_MINIMUM_ENGINE_CLOCK 800 + +struct ci_single_dpm_table { + u32 count; + struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct ci_dpm_table { + struct ci_single_dpm_table sclk_table; + struct ci_single_dpm_table mclk_table; + struct ci_single_dpm_table pcie_speed_table; + struct ci_single_dpm_table vddc_table; + struct ci_single_dpm_table vddci_table; + struct ci_single_dpm_table mvdd_table; +}; + +struct ci_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_ulv_parm +{ + bool supported; + u32 cg_ulv_parameter; + u32 volt_change_delay; + struct ci_pl pl; +}; + +#define CISLANDS_MAX_LEAKAGE_COUNT 8 + +struct ci_leakage_voltage { + u16 count; + u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT]; + u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT]; +}; + +struct ci_dpm_level_enable_mask { + u32 uvd_dpm_enable_mask; + u32 vce_dpm_enable_mask; + u32 acp_dpm_enable_mask; + u32 samu_dpm_enable_mask; + u32 sclk_dpm_enable_mask; + u32 mclk_dpm_enable_mask; + u32 pcie_dpm_enable_mask; +}; + +struct ci_vbios_boot_state +{ + u16 mvdd_bootup_value; + u16 vddc_bootup_value; + u16 vddci_bootup_value; + u32 sclk_bootup_value; + u32 mclk_bootup_value; + u16 pcie_gen_bootup_value; + u16 pcie_lane_bootup_value; +}; + +struct ci_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 dll_cntl; + u32 mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_dq_func_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl_1; + u32 mpll_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct ci_thermal_temperature_setting { + s32 temperature_low; + s32 temperature_high; + s32 temperature_shutdown; +}; + +struct ci_pcie_perf_range { + u16 max; + u16 min; +}; + +enum ci_pt_config_reg_type { + CISLANDS_CONFIGREG_MMR = 0, + CISLANDS_CONFIGREG_SMC_IND, + CISLANDS_CONFIGREG_DIDT_IND, + CISLANDS_CONFIGREG_CACHE, + CISLANDS_CONFIGREG_MAX +}; + +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct ci_pt_config_reg { + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum ci_pt_config_reg_type type; +}; + +struct ci_pt_defaults { + u8 svi_load_line_en; + u8 svi_load_line_vddc; + u8 tdc_vddc_throttle_release_limit_perc; + u8 tdc_mawt; + u8 tdc_waterfall_ctl; + u8 dte_ambient_temp_base; + u32 display_cac; + u32 bapm_temp_gradient; + u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; + u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +struct ci_power_info { + struct ci_dpm_table dpm_table; + u32 voltage_control; + u32 mvdd_control; + u32 vddci_control; + u32 active_auto_throttle_sources; + struct ci_clock_registers clock_registers; + u16 acpi_vddc; + u16 acpi_vddci; + enum radeon_pcie_gen force_pcie_gen; + enum radeon_pcie_gen acpi_pcie_gen; + struct ci_leakage_voltage vddc_leakage; + struct ci_leakage_voltage vddci_leakage; + u16 max_vddc_in_pp_table; + u16 min_vddc_in_pp_table; + u16 max_vddci_in_pp_table; + u16 min_vddci_in_pp_table; + u32 mclk_strobe_mode_threshold; + u32 mclk_stutter_mode_threshold; + u32 mclk_edc_enable_threshold; + u32 mclk_edc_wr_enable_threshold; + struct ci_vbios_boot_state vbios_boot_state; + /* smc offsets */ + u32 sram_end; + u32 dpm_table_start; + u32 soft_regs_start; + u32 mc_reg_table_start; + u32 fan_table_start; + u32 arb_table_start; + /* smc tables */ + SMU7_Discrete_DpmTable smc_state_table; + SMU7_Discrete_MCRegisters smc_mc_reg_table; + SMU7_Discrete_PmFuses smc_powertune_table; + /* other stuff */ + struct ci_mc_reg_table mc_reg_table; + struct atom_voltage_table vddc_voltage_table; + struct atom_voltage_table vddci_voltage_table; + struct atom_voltage_table mvdd_voltage_table; + struct ci_ulv_parm ulv; + u32 power_containment_features; + const struct ci_pt_defaults *powertune_defaults; + u32 dte_tj_offset; + bool vddc_phase_shed_control; + struct ci_thermal_temperature_setting thermal_temp_setting; + struct ci_dpm_level_enable_mask dpm_level_enable_mask; + u32 need_update_smu7_dpm_table; + u32 sclk_dpm_key_disabled; + u32 mclk_dpm_key_disabled; + u32 pcie_dpm_key_disabled; + struct ci_pcie_perf_range pcie_gen_performance; + struct ci_pcie_perf_range pcie_lane_performance; + struct ci_pcie_perf_range pcie_gen_powersaving; + struct ci_pcie_perf_range pcie_lane_powersaving; + u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS]; + u32 mclk_activity_target; + u32 low_sclk_interrupt_t; + u32 last_mclk_dpm_enable_mask; + u32 sys_pcie_mask; + /* caps */ + bool caps_power_containment; + bool caps_cac; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_fps; + bool caps_sclk_ds; + bool caps_sclk_ss_support; + bool caps_mclk_ss_support; + bool caps_uvd_dpm; + bool caps_vce_dpm; + bool caps_samu_dpm; + bool caps_acp_dpm; + bool caps_automatic_dc_transition; + bool caps_sclk_throttle_low_notification; + bool caps_dynamic_ac_timing; + /* flags */ + bool thermal_protection; + bool pcie_performance_request; + bool dynamic_ss; + bool dll_default_on; + bool cac_enabled; + bool uvd_enabled; + bool battery_state; + bool pspp_notify_required; + bool mem_gddr5; + bool enable_bapm_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool use_pcie_performance_levels; + bool use_pcie_powersaving_levels; + /* driver states */ + struct radeon_ps current_rps; + struct ci_ps current_ps; + struct radeon_ps requested_rps; + struct ci_ps requested_ps; +}; + +#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 +#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2 + +#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256 + +#define CISLANDS_VRC_DFLT0 0x3FFFC000 +#define CISLANDS_VRC_DFLT1 0x000400 +#define CISLANDS_VRC_DFLT2 0xC00080 +#define CISLANDS_VRC_DFLT3 0xC00200 +#define CISLANDS_VRC_DFLT4 0xC01680 +#define CISLANDS_VRC_DFLT5 0xC00033 +#define CISLANDS_VRC_DFLT6 0xC00033 +#define CISLANDS_VRC_DFLT7 0x3FFFC000 + +#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035 +#define CISLAND_TARGETACTIVITY_DFLT 30 +#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10 + +#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 +#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 +#define PCIE_PERF_REQ_PECI_GEN1 2 +#define PCIE_PERF_REQ_PECI_GEN2 3 +#define PCIE_PERF_REQ_PECI_GEN3 4 + +int ci_copy_bytes_to_smc(struct radeon_device *rdev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); +void ci_start_smc(struct radeon_device *rdev); +void ci_reset_smc(struct radeon_device *rdev); +int ci_program_jump_on_start(struct radeon_device *rdev); +void ci_stop_smc_clock(struct radeon_device *rdev); +void ci_start_smc_clock(struct radeon_device *rdev); +bool ci_is_smc_running(struct radeon_device *rdev); +PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg); +PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev); +int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit); +int ci_read_smc_sram_dword(struct radeon_device *rdev, + u32 smc_address, u32 *value, u32 limit); +int ci_write_smc_sram_dword(struct radeon_device *rdev, + u32 smc_address, u32 value, u32 limit); + +#endif diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c new file mode 100644 index 000000000000..53b43dd3cf1e --- /dev/null +++ b/drivers/gpu/drm/radeon/ci_smc.c @@ -0,0 +1,262 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include +#include "drmP.h" +#include "radeon.h" +#include "cikd.h" +#include "ppsmc.h" +#include "radeon_ucode.h" + +static int ci_set_smc_sram_address(struct radeon_device *rdev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(SMC_IND_INDEX_0, smc_address); + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + + return 0; +} + +int ci_copy_bytes_to_smc(struct radeon_device *rdev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + u32 data, original_data; + u32 addr; + u32 extra_shift; + int ret; + + if (smc_start_address & 3) + return -EINVAL; + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + ret = ci_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = ci_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(SMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = ci_set_smc_sram_address(rdev, addr, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, data); + } + return 0; +} + +void ci_start_smc(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + + tmp &= ~RST_REG; + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +void ci_reset_smc(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + + tmp |= RST_REG; + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +int ci_program_jump_on_start(struct radeon_device *rdev) +{ + static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; + + return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); +} + +void ci_stop_smc_clock(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + tmp |= CK_DISABLE; + + WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +void ci_start_smc_clock(struct radeon_device *rdev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + tmp &= ~CK_DISABLE; + + WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +bool ci_is_smc_running(struct radeon_device *rdev) +{ + u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + u32 pc_c = RREG32_SMC(SMC_PC_C); + + if (!(clk & CK_DISABLE) && (0x20100 <= pc_c)) + return true; + + return false; +} + +PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) +{ + u32 tmp; + int i; + + if (!ci_is_smc_running(rdev)) + return PPSMC_Result_Failed; + + WREG32(SMC_MESSAGE_0, msg); + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(SMC_RESP_0); + if (tmp != 0) + break; + udelay(1); + } + tmp = RREG32(SMC_RESP_0); + + return (PPSMC_Result)tmp; +} + +PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev) +{ + u32 tmp; + int i; + + if (!ci_is_smc_running(rdev)) + return PPSMC_Result_OK; + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + if ((tmp & CKEN) == 0) + break; + udelay(1); + } + + return PPSMC_Result_OK; +} + +int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) +{ + u32 ucode_start_address; + u32 ucode_size; + const u8 *src; + u32 data; + + if (!rdev->smc_fw) + return -EINVAL; + + switch (rdev->family) { + case CHIP_BONAIRE: + ucode_start_address = BONAIRE_SMC_UCODE_START; + ucode_size = BONAIRE_SMC_UCODE_SIZE; + break; + default: + DRM_ERROR("unknown asic in smc ucode loader\n"); + BUG(); + } + + if (ucode_size & 3) + return -EINVAL; + + src = (const u8 *)rdev->smc_fw->data; + WREG32(SMC_IND_INDEX_0, ucode_start_address); + WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); + while (ucode_size >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + ucode_size -= 4; + } + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + + return 0; +} + +int ci_read_smc_sram_dword(struct radeon_device *rdev, + u32 smc_address, u32 *value, u32 limit) +{ + int ret; + + ret = ci_set_smc_sram_address(rdev, smc_address, limit); + if (ret) + return ret; + + *value = RREG32(SMC_IND_DATA_0); + return 0; +} + +int ci_write_smc_sram_dword(struct radeon_device *rdev, + u32 smc_address, u32 value, u32 limit) +{ + int ret; + + ret = ci_set_smc_sram_address(rdev, smc_address, limit); + if (ret) + return ret; + + WREG32(SMC_IND_DATA_0, value); + return 0; +} diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 87e5aeed6e88..736a416b51a7 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -40,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); +MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); MODULE_FIRMWARE("radeon/KAVERI_me.bin"); MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); @@ -1545,7 +1546,7 @@ static int cik_init_microcode(struct radeon_device *rdev) const char *chip_name; size_t pfp_req_size, me_req_size, ce_req_size, mec_req_size, rlc_req_size, mc_req_size, - sdma_req_size; + sdma_req_size, smc_req_size; char fw_name[30]; int err; @@ -1561,6 +1562,7 @@ static int cik_init_microcode(struct radeon_device *rdev) rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; mc_req_size = CIK_MC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); break; case CHIP_KAVERI: chip_name = "KAVERI"; @@ -1652,7 +1654,7 @@ static int cik_init_microcode(struct radeon_device *rdev) err = -EINVAL; } - /* No MC ucode on APUs */ + /* No SMC, MC ucode on APUs */ if (!(rdev->flags & RADEON_IS_IGP)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); @@ -1664,6 +1666,21 @@ static int cik_init_microcode(struct radeon_device *rdev) rdev->mc_fw->size, fw_name); err = -EINVAL; } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { + printk(KERN_ERR + "cik_smc: Bogus length %zu in firmware \"%s\"\n", + rdev->smc_fw->size, fw_name); + err = -EINVAL; + } } out: @@ -1682,6 +1699,8 @@ out: rdev->rlc_fw = NULL; release_firmware(rdev->mc_fw); rdev->mc_fw = NULL; + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; } return err; } @@ -6626,8 +6645,12 @@ int cik_irq_set(struct radeon_device *rdev) cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; - thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & - ~(THERM_INTH_MASK | THERM_INTL_MASK); + if (rdev->flags & RADEON_IS_IGP) + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & + ~(THERM_INTH_MASK | THERM_INTL_MASK); + else + thermal_int = RREG32_SMC(CG_THERMAL_INT) & + ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { @@ -6788,7 +6811,10 @@ int cik_irq_set(struct radeon_device *rdev) if (rdev->irq.dpm_thermal) { DRM_DEBUG("dpm thermal\n"); - thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + if (rdev->flags & RADEON_IS_IGP) + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + else + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -6825,7 +6851,10 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); - WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + if (rdev->flags & RADEON_IS_IGP) + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + else + WREG32_SMC(CG_THERMAL_INT, thermal_int); return 0; } diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 179ca3625ae4..861fb3ec161c 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -36,6 +36,23 @@ #define DIDT_TCP_CTRL0 0x60 /* SMC IND registers */ +#define DPM_TABLE_475 0x3F768 +# define SamuBootLevel(x) ((x) << 0) +# define SamuBootLevel_MASK 0x000000ff +# define SamuBootLevel_SHIFT 0 +# define AcpBootLevel(x) ((x) << 8) +# define AcpBootLevel_MASK 0x0000ff00 +# define AcpBootLevel_SHIFT 8 +# define VceBootLevel(x) ((x) << 16) +# define VceBootLevel_MASK 0x00ff0000 +# define VceBootLevel_SHIFT 16 +# define UvdBootLevel(x) ((x) << 24) +# define UvdBootLevel_MASK 0xff000000 +# define UvdBootLevel_SHIFT 24 + +#define FIRMWARE_FLAGS 0x3F800 +# define INTERRUPTS_ENABLED (1 << 0) + #define NB_DPM_CONFIG_1 0x3F9E8 # define Dpm0PgNbPsLo(x) ((x) << 0) # define Dpm0PgNbPsLo_MASK 0x000000ff @@ -50,25 +67,85 @@ # define DpmXNbPsHi_MASK 0xff000000 # define DpmXNbPsHi_SHIFT 24 +#define SMC_SYSCON_RESET_CNTL 0x80000000 +# define RST_REG (1 << 0) +#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004 +# define CK_DISABLE (1 << 0) +# define CKEN (1 << 24) + +#define SMC_SYSCON_MISC_CNTL 0x80000010 + #define SMC_SYSCON_MSG_ARG_0 0x80000068 +#define SMC_PC_C 0x80000370 + +#define SMC_SCRATCH9 0x80000424 + +#define RCU_UC_EVENTS 0xC0000004 +# define BOOT_SEQ_DONE (1 << 7) + #define GENERAL_PWRMGT 0xC0200000 # define GLOBAL_PWRMGT_EN (1 << 0) +# define STATIC_PM_EN (1 << 1) +# define THERMAL_PROTECTION_DIS (1 << 2) +# define THERMAL_PROTECTION_TYPE (1 << 3) +# define SW_SMIO_INDEX(x) ((x) << 6) +# define SW_SMIO_INDEX_MASK (1 << 6) +# define SW_SMIO_INDEX_SHIFT 6 +# define VOLT_PWRMGT_EN (1 << 10) # define GPU_COUNTER_CLK (1 << 15) +# define DYN_SPREAD_SPECTRUM_EN (1 << 23) + +#define CNB_PWRMGT_CNTL 0xC0200004 +# define GNB_SLOW_MODE(x) ((x) << 0) +# define GNB_SLOW_MODE_MASK (3 << 0) +# define GNB_SLOW_MODE_SHIFT 0 +# define GNB_SLOW (1 << 2) +# define FORCE_NB_PS1 (1 << 3) +# define DPM_ENABLED (1 << 4) #define SCLK_PWRMGT_CNTL 0xC0200008 +# define SCLK_PWRMGT_OFF (1 << 0) # define RESET_BUSY_CNT (1 << 4) # define RESET_SCLK_CNT (1 << 5) # define DYNAMIC_PM_EN (1 << 21) +#define CG_SSP 0xC0200044 +# define SST(x) ((x) << 0) +# define SST_MASK (0xffff << 0) +# define SSTU(x) ((x) << 16) +# define SSTU_MASK (0xf << 16) + +#define CG_DISPLAY_GAP_CNTL 0xC0200060 +# define DISP_GAP(x) ((x) << 0) +# define DISP_GAP_MASK (3 << 0) +# define VBI_TIMER_COUNT(x) ((x) << 4) +# define VBI_TIMER_COUNT_MASK (0x3fff << 4) +# define VBI_TIMER_UNIT(x) ((x) << 20) +# define VBI_TIMER_UNIT_MASK (7 << 20) +# define DISP_GAP_MCHG(x) ((x) << 24) +# define DISP_GAP_MCHG_MASK (3 << 24) + +#define CG_ULV_PARAMETER 0xC0200158 + #define CG_FTV_0 0xC02001A8 +#define CG_FTV_1 0xC02001AC +#define CG_FTV_2 0xC02001B0 +#define CG_FTV_3 0xC02001B4 +#define CG_FTV_4 0xC02001B8 +#define CG_FTV_5 0xC02001BC +#define CG_FTV_6 0xC02001C0 +#define CG_FTV_7 0xC02001C4 + +#define CG_DISPLAY_GAP_CNTL2 0xC0200230 #define LCAC_SX0_OVR_SEL 0xC0400D04 #define LCAC_SX0_OVR_VAL 0xC0400D08 +#define LCAC_MC0_CNTL 0xC0400D30 #define LCAC_MC0_OVR_SEL 0xC0400D34 #define LCAC_MC0_OVR_VAL 0xC0400D38 - +#define LCAC_MC1_CNTL 0xC0400D3C #define LCAC_MC1_OVR_SEL 0xC0400D40 #define LCAC_MC1_OVR_VAL 0xC0400D44 @@ -78,9 +155,28 @@ #define LCAC_MC3_OVR_SEL 0xC0400D58 #define LCAC_MC3_OVR_VAL 0xC0400D5C +#define LCAC_CPL_CNTL 0xC0400D80 #define LCAC_CPL_OVR_SEL 0xC0400D84 #define LCAC_CPL_OVR_VAL 0xC0400D88 +/* dGPU */ +#define CG_THERMAL_CTRL 0xC0300004 +#define DPM_EVENT_SRC(x) ((x) << 0) +#define DPM_EVENT_SRC_MASK (7 << 0) +#define DIG_THERM_DPM(x) ((x) << 14) +#define DIG_THERM_DPM_MASK 0x003FC000 +#define DIG_THERM_DPM_SHIFT 14 + +#define CG_THERMAL_INT 0xC030000C +#define CI_DIG_THERM_INTH(x) ((x) << 8) +#define CI_DIG_THERM_INTH_MASK 0x0000FF00 +#define CI_DIG_THERM_INTH_SHIFT 8 +#define CI_DIG_THERM_INTL(x) ((x) << 16) +#define CI_DIG_THERM_INTL_MASK 0x00FF0000 +#define CI_DIG_THERM_INTL_SHIFT 16 +#define THERM_INT_MASK_HIGH (1 << 24) +#define THERM_INT_MASK_LOW (1 << 25) + #define CG_MULT_THERMAL_STATUS 0xC0300014 #define ASIC_MAX_TEMP(x) ((x) << 0) #define ASIC_MAX_TEMP_MASK 0x000001ff @@ -89,6 +185,35 @@ #define CTF_TEMP_MASK 0x0003fe00 #define CTF_TEMP_SHIFT 9 +#define CG_SPLL_FUNC_CNTL 0xC0500140 +#define SPLL_RESET (1 << 0) +#define SPLL_PWRON (1 << 1) +#define SPLL_BYPASS_EN (1 << 3) +#define SPLL_REF_DIV(x) ((x) << 5) +#define SPLL_REF_DIV_MASK (0x3f << 5) +#define SPLL_PDIV_A(x) ((x) << 20) +#define SPLL_PDIV_A_MASK (0x7f << 20) +#define SPLL_PDIV_A_SHIFT 20 +#define CG_SPLL_FUNC_CNTL_2 0xC0500144 +#define SCLK_MUX_SEL(x) ((x) << 0) +#define SCLK_MUX_SEL_MASK (0x1ff << 0) +#define CG_SPLL_FUNC_CNTL_3 0xC0500148 +#define SPLL_FB_DIV(x) ((x) << 0) +#define SPLL_FB_DIV_MASK (0x3ffffff << 0) +#define SPLL_FB_DIV_SHIFT 0 +#define SPLL_DITHEN (1 << 28) +#define CG_SPLL_FUNC_CNTL_4 0xC050014C + +#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164 +#define SSEN (1 << 0) +#define CLK_S(x) ((x) << 4) +#define CLK_S_MASK (0xfff << 4) +#define CLK_S_SHIFT 4 +#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168 +#define CLK_V(x) ((x) << 0) +#define CLK_V_MASK (0x3ffffff << 0) +#define CLK_V_SHIFT 0 + #define MPLL_BYPASSCLK_SEL 0xC050019C # define MPLL_CLKOUT_SEL(x) ((x) << 8) # define MPLL_CLKOUT_SEL_MASK 0xFF00 @@ -109,6 +234,7 @@ # define ZCLK_SEL(x) ((x) << 8) # define ZCLK_SEL_MASK 0xFF00 +/* KV/KB */ #define CG_THERMAL_INT_CTRL 0xC2100028 #define DIG_THERM_INTH(x) ((x) << 0) #define DIG_THERM_INTH_MASK 0x000000FF @@ -437,9 +563,37 @@ #define NOOFGROUPS_SHIFT 12 #define NOOFGROUPS_MASK 0x00001000 +#define MC_ARB_DRAM_TIMING 0x2774 +#define MC_ARB_DRAM_TIMING2 0x2778 + +#define MC_ARB_BURST_TIME 0x2808 +#define STATE0(x) ((x) << 0) +#define STATE0_MASK (0x1f << 0) +#define STATE0_SHIFT 0 +#define STATE1(x) ((x) << 5) +#define STATE1_MASK (0x1f << 5) +#define STATE1_SHIFT 5 +#define STATE2(x) ((x) << 10) +#define STATE2_MASK (0x1f << 10) +#define STATE2_SHIFT 10 +#define STATE3(x) ((x) << 15) +#define STATE3_MASK (0x1f << 15) +#define STATE3_SHIFT 15 + +#define MC_SEQ_RAS_TIMING 0x28a0 +#define MC_SEQ_CAS_TIMING 0x28a4 +#define MC_SEQ_MISC_TIMING 0x28a8 +#define MC_SEQ_MISC_TIMING2 0x28ac +#define MC_SEQ_PMG_TIMING 0x28b0 +#define MC_SEQ_RD_CTL_D0 0x28b4 +#define MC_SEQ_RD_CTL_D1 0x28b8 +#define MC_SEQ_WR_CTL_D0 0x28bc +#define MC_SEQ_WR_CTL_D1 0x28c0 + #define MC_SEQ_SUP_CNTL 0x28c8 #define RUN_MASK (1 << 0) #define MC_SEQ_SUP_PGM 0x28cc +#define MC_PMG_AUTO_CMD 0x28d0 #define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8 #define TRAIN_DONE_D0 (1 << 30) @@ -448,9 +602,90 @@ #define MC_IO_PAD_CNTL_D0 0x29d0 #define MEM_FALL_OUT_CMD (1 << 8) +#define MC_SEQ_MISC0 0x2a00 +#define MC_SEQ_MISC0_VEN_ID_SHIFT 8 +#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00 +#define MC_SEQ_MISC0_VEN_ID_VALUE 3 +#define MC_SEQ_MISC0_REV_ID_SHIFT 12 +#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000 +#define MC_SEQ_MISC0_REV_ID_VALUE 1 +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 +#define MC_SEQ_MISC1 0x2a04 +#define MC_SEQ_RESERVE_M 0x2a08 +#define MC_PMG_CMD_EMRS 0x2a0c + #define MC_SEQ_IO_DEBUG_INDEX 0x2a44 #define MC_SEQ_IO_DEBUG_DATA 0x2a48 +#define MC_SEQ_MISC5 0x2a54 +#define MC_SEQ_MISC6 0x2a58 + +#define MC_SEQ_MISC7 0x2a64 + +#define MC_SEQ_RAS_TIMING_LP 0x2a6c +#define MC_SEQ_CAS_TIMING_LP 0x2a70 +#define MC_SEQ_MISC_TIMING_LP 0x2a74 +#define MC_SEQ_MISC_TIMING2_LP 0x2a78 +#define MC_SEQ_WR_CTL_D0_LP 0x2a7c +#define MC_SEQ_WR_CTL_D1_LP 0x2a80 +#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84 +#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88 + +#define MC_PMG_CMD_MRS 0x2aac + +#define MC_SEQ_RD_CTL_D0_LP 0x2b1c +#define MC_SEQ_RD_CTL_D1_LP 0x2b20 + +#define MC_PMG_CMD_MRS1 0x2b44 +#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48 +#define MC_SEQ_PMG_TIMING_LP 0x2b4c + +#define MC_SEQ_WR_CTL_2 0x2b54 +#define MC_SEQ_WR_CTL_2_LP 0x2b58 +#define MC_PMG_CMD_MRS2 0x2b5c +#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60 + +#define MCLK_PWRMGT_CNTL 0x2ba0 +# define DLL_SPEED(x) ((x) << 0) +# define DLL_SPEED_MASK (0x1f << 0) +# define DLL_READY (1 << 6) +# define MC_INT_CNTL (1 << 7) +# define MRDCK0_PDNB (1 << 8) +# define MRDCK1_PDNB (1 << 9) +# define MRDCK0_RESET (1 << 16) +# define MRDCK1_RESET (1 << 17) +# define DLL_READY_READ (1 << 24) +#define DLL_CNTL 0x2ba4 +# define MRDCK0_BYPASS (1 << 24) +# define MRDCK1_BYPASS (1 << 25) + +#define MPLL_FUNC_CNTL 0x2bb4 +#define BWCTRL(x) ((x) << 20) +#define BWCTRL_MASK (0xff << 20) +#define MPLL_FUNC_CNTL_1 0x2bb8 +#define VCO_MODE(x) ((x) << 0) +#define VCO_MODE_MASK (3 << 0) +#define CLKFRAC(x) ((x) << 4) +#define CLKFRAC_MASK (0xfff << 4) +#define CLKF(x) ((x) << 16) +#define CLKF_MASK (0xfff << 16) +#define MPLL_FUNC_CNTL_2 0x2bbc +#define MPLL_AD_FUNC_CNTL 0x2bc0 +#define YCLK_POST_DIV(x) ((x) << 0) +#define YCLK_POST_DIV_MASK (7 << 0) +#define MPLL_DQ_FUNC_CNTL 0x2bc4 +#define YCLK_SEL(x) ((x) << 4) +#define YCLK_SEL_MASK (1 << 4) + +#define MPLL_SS1 0x2bcc +#define CLKV(x) ((x) << 0) +#define CLKV_MASK (0x3ffffff << 0) +#define MPLL_SS2 0x2bd0 +#define CLKS(x) ((x) << 0) +#define CLKS_MASK (0xfff << 0) + #define HDP_HOST_PATH_CNTL 0x2C00 #define CLOCK_GATING_DIS (1 << 23) #define HDP_NONSURFACE_BASE 0x2C04 @@ -465,6 +700,22 @@ #define ATC_MISC_CG 0x3350 +#define MC_SEQ_CNTL_3 0x3600 +# define CAC_EN (1 << 31) +#define MC_SEQ_G5PDX_CTRL 0x3604 +#define MC_SEQ_G5PDX_CTRL_LP 0x3608 +#define MC_SEQ_G5PDX_CMD0 0x360c +#define MC_SEQ_G5PDX_CMD0_LP 0x3610 +#define MC_SEQ_G5PDX_CMD1 0x3614 +#define MC_SEQ_G5PDX_CMD1_LP 0x3618 + +#define MC_SEQ_PMG_DVS_CTL 0x3628 +#define MC_SEQ_PMG_DVS_CTL_LP 0x362c +#define MC_SEQ_PMG_DVS_CMD 0x3630 +#define MC_SEQ_PMG_DVS_CMD_LP 0x3634 +#define MC_SEQ_DLL_STBY 0x3638 +#define MC_SEQ_DLL_STBY_LP 0x363c + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_RB_SIZE(x) ((x) << 1) /* log2 */ @@ -492,6 +743,9 @@ # define MC_WR_CLEAN_CNT(x) ((x) << 20) # define MC_VMID(x) ((x) << 25) +#define BIF_LNCNT_RESET 0x5220 +# define RESET_LNCNT_EN (1 << 0) + #define CONFIG_MEMSIZE 0x5428 #define INTERRUPT_CNTL 0x5468 @@ -628,6 +882,9 @@ # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) +#define DPG_PIPE_STUTTER_CONTROL 0x6cd4 +# define STUTTER_ENABLE (1 << 0) + #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h index 6db6e320bc79..4c1ee6df09a0 100644 --- a/drivers/gpu/drm/radeon/ppsmc.h +++ b/drivers/gpu/drm/radeon/ppsmc.h @@ -99,7 +99,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) -/* KV/KB */ +/* CI/KV/KB */ #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) #define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) #define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) @@ -108,6 +108,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) #define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) #define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) #define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) #define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) #define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) @@ -116,8 +117,13 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) #define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) #define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) #define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) #define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) #define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) #define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) #define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) @@ -126,9 +132,25 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) #define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) #define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) #define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) #define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) #define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) /* TN */ #define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b6bac497f001..930650ec769c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -246,6 +246,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, u16 *voltage, u16 leakage_idx); +int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev, + u16 *leakage_id); +int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev, + u16 *vddc, u16 *vddci, + u16 virtual_voltage_id, + u16 vbios_voltage_id); int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, u8 voltage_type, u16 nominal_voltage, diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 9c83ecfd0eb7..c633fa53def0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2454,6 +2454,20 @@ static struct radeon_asic ci_asic = { .set_uvd_clocks = &cik_set_uvd_clocks, .get_temperature = &ci_get_temp, }, + .dpm = { + .init = &ci_dpm_init, + .setup_asic = &ci_dpm_setup_asic, + .enable = &ci_dpm_enable, + .disable = &ci_dpm_disable, + .pre_set_power_state = &ci_dpm_pre_set_power_state, + .set_power_state = &ci_dpm_set_power_state, + .post_set_power_state = &ci_dpm_post_set_power_state, + .display_configuration_changed = &ci_dpm_display_configuration_changed, + .fini = &ci_dpm_fini, + .get_sclk = &ci_dpm_get_sclk, + .get_mclk = &ci_dpm_get_mclk, + .print_power_state = &ci_dpm_print_power_state, + }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 68a1a1fb371d..350da1704964 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -750,6 +750,20 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, int ci_get_temp(struct radeon_device *rdev); int kv_get_temp(struct radeon_device *rdev); +int ci_dpm_init(struct radeon_device *rdev); +int ci_dpm_enable(struct radeon_device *rdev); +void ci_dpm_disable(struct radeon_device *rdev); +int ci_dpm_pre_set_power_state(struct radeon_device *rdev); +int ci_dpm_set_power_state(struct radeon_device *rdev); +void ci_dpm_post_set_power_state(struct radeon_device *rdev); +void ci_dpm_setup_asic(struct radeon_device *rdev); +void ci_dpm_display_configuration_changed(struct radeon_device *rdev); +void ci_dpm_fini(struct radeon_device *rdev); +u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low); +u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low); +void ci_dpm_print_power_state(struct radeon_device *rdev, + struct radeon_ps *ps); + int kv_dpm_init(struct radeon_device *rdev); int kv_dpm_enable(struct radeon_device *rdev); void kv_dpm_disable(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6247b5e2d074..7ba439e9f30f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -211,7 +211,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) } static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, - u8 id) + u8 id) { struct atom_context *ctx = rdev->mode_info.atom_context; struct radeon_gpio_rec gpio; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 37d3d343f687..66b04af16949 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1202,6 +1202,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: + case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: /* DPM requires the RLC, RV770+ dGPU requires SMC */ diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index fad27c051bbf..33858364fe89 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -140,4 +140,7 @@ #define HAINAN_SMC_UCODE_START 0x10000 #define HAINAN_SMC_UCODE_SIZE 0xe67C +#define BONAIRE_SMC_UCODE_START 0x20000 +#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC + #endif diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0f8be48c2ef4..96d96f5df9e7 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3663,7 +3663,7 @@ static void si_clear_vc(struct radeon_device *rdev) WREG32(CG_FTV, 0); } -static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) +u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) { u8 mc_para_index; @@ -3676,7 +3676,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) return mc_para_index; } -static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) +u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) { u8 mc_para_index; @@ -3758,9 +3758,9 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev, return true; } -static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, - u32 max_voltage_steps, - struct atom_voltage_table *voltage_table) +void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, + u32 max_voltage_steps, + struct atom_voltage_table *voltage_table) { unsigned int i, diff; diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h new file mode 100644 index 000000000000..82f70c90a9ee --- /dev/null +++ b/drivers/gpu/drm/radeon/smu7_discrete.h @@ -0,0 +1,486 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU7_DISCRETE_H +#define SMU7_DISCRETE_H + +#include "smu7.h" + +#pragma pack(push, 1) + +#define SMU7_DTE_ITERATIONS 5 +#define SMU7_DTE_SOURCES 3 +#define SMU7_DTE_SINKS 1 +#define SMU7_NUM_CPU_TES 0 +#define SMU7_NUM_GPU_TES 1 +#define SMU7_NUM_NON_TES 2 + +struct SMU7_SoftRegisters +{ + uint32_t RefClockFrequency; + uint32_t PmTimerP; + uint32_t FeatureEnables; + uint32_t PreVBlankGap; + uint32_t VBlankTimeout; + uint32_t TrainTimeGap; + + uint32_t MvddSwitchTime; + uint32_t LongestAcpiTrainTime; + uint32_t AcpiDelay; + uint32_t G5TrainTime; + uint32_t DelayMpllPwron; + uint32_t VoltageChangeTimeout; + uint32_t HandshakeDisables; + + uint8_t DisplayPhy1Config; + uint8_t DisplayPhy2Config; + uint8_t DisplayPhy3Config; + uint8_t DisplayPhy4Config; + + uint8_t DisplayPhy5Config; + uint8_t DisplayPhy6Config; + uint8_t DisplayPhy7Config; + uint8_t DisplayPhy8Config; + + uint32_t AverageGraphicsA; + uint32_t AverageMemoryA; + uint32_t AverageGioA; + + uint8_t SClkDpmEnabledLevels; + uint8_t MClkDpmEnabledLevels; + uint8_t LClkDpmEnabledLevels; + uint8_t PCIeDpmEnabledLevels; + + uint8_t UVDDpmEnabledLevels; + uint8_t SAMUDpmEnabledLevels; + uint8_t ACPDpmEnabledLevels; + uint8_t VCEDpmEnabledLevels; + + uint32_t DRAM_LOG_ADDR_H; + uint32_t DRAM_LOG_ADDR_L; + uint32_t DRAM_LOG_PHY_ADDR_H; + uint32_t DRAM_LOG_PHY_ADDR_L; + uint32_t DRAM_LOG_BUFF_SIZE; + uint32_t UlvEnterC; + uint32_t UlvTime; + uint32_t Reserved[3]; + +}; + +typedef struct SMU7_SoftRegisters SMU7_SoftRegisters; + +struct SMU7_Discrete_VoltageLevel +{ + uint16_t Voltage; + uint16_t StdVoltageHiSidd; + uint16_t StdVoltageLoSidd; + uint8_t Smio; + uint8_t padding; +}; + +typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel; + +struct SMU7_Discrete_GraphicsLevel +{ + uint32_t Flags; + uint32_t MinVddc; + uint32_t MinVddcPhases; + + uint32_t SclkFrequency; + + uint8_t padding1[2]; + uint16_t ActivityLevel; + + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t EnabledForActivity; + uint8_t EnabledForThrottle; + uint8_t UpH; + uint8_t DownH; + uint8_t VoltageDownH; + uint8_t PowerThrottle; + uint8_t DeepSleepDivId; + uint8_t padding[3]; +}; + +typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel; + +struct SMU7_Discrete_ACPILevel +{ + uint32_t Flags; + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t SclkFrequency; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t DeepSleepDivId; + uint8_t padding; + uint32_t CgSpllFuncCntl; + uint32_t CgSpllFuncCntl2; + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; +}; + +typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel; + +struct SMU7_Discrete_Ulv +{ + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint16_t VddcOffset; + uint8_t VddcOffsetVid; + uint8_t VddcPhase; + uint32_t Reserved; +}; + +typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv; + +struct SMU7_Discrete_MemoryLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t MinVddci; + uint32_t MinMvdd; + + uint32_t MclkFrequency; + + uint8_t EdcReadEnable; + uint8_t EdcWriteEnable; + uint8_t RttEnable; + uint8_t StutterEnable; + + uint8_t StrobeEnable; + uint8_t StrobeRatio; + uint8_t EnabledForThrottle; + uint8_t EnabledForActivity; + + uint8_t UpH; + uint8_t DownH; + uint8_t VoltageDownH; + uint8_t padding; + + uint16_t ActivityLevel; + uint8_t DisplayWatermark; + uint8_t padding1; + + uint32_t MpllFuncCntl; + uint32_t MpllFuncCntl_1; + uint32_t MpllFuncCntl_2; + uint32_t MpllAdFuncCntl; + uint32_t MpllDqFuncCntl; + uint32_t MclkPwrmgtCntl; + uint32_t DllCntl; + uint32_t MpllSs1; + uint32_t MpllSs2; +}; + +typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel; + +struct SMU7_Discrete_LinkLevel +{ + uint8_t PcieGenSpeed; + uint8_t PcieLaneCount; + uint8_t EnabledForActivity; + uint8_t Padding; + uint32_t DownT; + uint32_t UpT; + uint32_t Reserved; +}; + +typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel; + + +struct SMU7_Discrete_MCArbDramTimingTableEntry +{ + uint32_t McArbDramTiming; + uint32_t McArbDramTiming2; + uint8_t McArbBurstTime; + uint8_t padding[3]; +}; + +typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry; + +struct SMU7_Discrete_MCArbDramTimingTable +{ + SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; +}; + +typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable; + +struct SMU7_Discrete_UvdLevel +{ + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint16_t MinVddc; + uint8_t MinVddcPhases; + uint8_t VclkDivider; + uint8_t DclkDivider; + uint8_t padding[3]; +}; + +typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel; + +struct SMU7_Discrete_ExtClkLevel +{ + uint32_t Frequency; + uint16_t MinVoltage; + uint8_t MinPhases; + uint8_t Divider; +}; + +typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel; + +struct SMU7_Discrete_StateInfo +{ + uint32_t SclkFrequency; + uint32_t MclkFrequency; + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint32_t SamclkFrequency; + uint32_t AclkFrequency; + uint32_t EclkFrequency; + uint16_t MvddVoltage; + uint16_t padding16; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + uint8_t McRegIndex; + uint8_t SeqIndex; + uint8_t SclkDid; + int8_t SclkIndex; + int8_t MclkIndex; + uint8_t PCIeGen; + +}; + +typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo; + + +struct SMU7_Discrete_DpmTable +{ + SMU7_PIDController GraphicsPIDController; + SMU7_PIDController MemoryPIDController; + SMU7_PIDController LinkPIDController; + + uint32_t SystemFlags; + + + uint32_t SmioMaskVddcVid; + uint32_t SmioMaskVddcPhase; + uint32_t SmioMaskVddciVid; + uint32_t SmioMaskMvddVid; + + uint32_t VddcLevelCount; + uint32_t VddciLevelCount; + uint32_t MvddLevelCount; + + SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC]; +// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC]; + SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI]; + SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD]; + + uint8_t GraphicsDpmLevelCount; + uint8_t MemoryDpmLevelCount; + uint8_t LinkLevelCount; + uint8_t UvdLevelCount; + uint8_t VceLevelCount; + uint8_t AcpLevelCount; + uint8_t SamuLevelCount; + uint8_t MasterDeepSleepControl; + uint32_t Reserved[5]; +// uint32_t SamuDefaultLevel; + + SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS]; + SMU7_Discrete_MemoryLevel MemoryACPILevel; + SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY]; + SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK]; + SMU7_Discrete_ACPILevel ACPILevel; + SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD]; + SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE]; + SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP]; + SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU]; + SMU7_Discrete_Ulv Ulv; + + uint32_t SclkStepSize; + uint32_t Smio [SMU7_MAX_ENTRIES_SMIO]; + + uint8_t UvdBootLevel; + uint8_t VceBootLevel; + uint8_t AcpBootLevel; + uint8_t SamuBootLevel; + + uint8_t UVDInterval; + uint8_t VCEInterval; + uint8_t ACPInterval; + uint8_t SAMUInterval; + + uint8_t GraphicsBootLevel; + uint8_t GraphicsVoltageChangeEnable; + uint8_t GraphicsThermThrottleEnable; + uint8_t GraphicsInterval; + + uint8_t VoltageInterval; + uint8_t ThermalInterval; + uint16_t TemperatureLimitHigh; + + uint16_t TemperatureLimitLow; + uint8_t MemoryBootLevel; + uint8_t MemoryVoltageChangeEnable; + + uint8_t MemoryInterval; + uint8_t MemoryThermThrottleEnable; + uint16_t VddcVddciDelta; + + uint16_t VoltageResponseTime; + uint16_t PhaseResponseTime; + + uint8_t PCIeBootLinkLevel; + uint8_t PCIeGenInterval; + uint8_t DTEInterval; + uint8_t DTEMode; + + uint8_t SVI2Enable; + uint8_t VRHotGpio; + uint8_t AcDcGpio; + uint8_t ThermGpio; + + uint16_t PPM_PkgPwrLimit; + uint16_t PPM_TemperatureLimit; + + uint16_t DefaultTdp; + uint16_t TargetTdp; + + uint16_t FpsHighT; + uint16_t FpsLowT; + + uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS]; + uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS]; + + uint8_t DTEAmbientTempBase; + uint8_t DTETjOffset; + uint8_t GpuTjMax; + uint8_t GpuTjHyst; + + uint16_t BootVddc; + uint16_t BootVddci; + + uint16_t BootMVdd; + uint16_t padding; + + uint32_t BAPM_TEMP_GRADIENT; + + uint32_t LowSclkInterruptT; +}; + +typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable; + +#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16 +#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY + +struct SMU7_Discrete_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress; + +struct SMU7_Discrete_MCRegisterSet +{ + uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet; + +struct SMU7_Discrete_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; + SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters; + +struct SMU7_Discrete_PmFuses { + // dw0-dw1 + uint8_t BapmVddCVidHiSidd[8]; + + // dw2-dw3 + uint8_t BapmVddCVidLoSidd[8]; + + // dw4-dw5 + uint8_t VddCVid[8]; + + // dw6 + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t SviLoadLineTrimVddC; + uint8_t SviLoadLineOffsetVddC; + + // dw7 + uint16_t TDC_VDDC_PkgLimit; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + + // dw8 + uint8_t TdcWaterfallCtl; + uint8_t LPMLTemperatureMin; + uint8_t LPMLTemperatureMax; + uint8_t Reserved; + + // dw9-dw10 + uint8_t BapmVddCVidHiSidd2[8]; + + // dw11-dw12 + uint32_t Reserved6[2]; + + // dw13-dw16 + uint8_t GnbLPML[16]; + + // dw17 + uint8_t GnbLPMLMaxVid; + uint8_t GnbLPMLMinVid; + uint8_t Reserved1[2]; + + // dw18 + uint16_t BapmVddCBaseLeakageHiSidd; + uint16_t BapmVddCBaseLeakageLoSidd; +}; + +typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses; + + +#pragma pack(pop) + +#endif + -- cgit v1.2.3 From 94b4adc5ae30fb451300bdca901ae9771f6baf5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 15 Jul 2013 17:34:33 -0400 Subject: drm/radeon/dpm: add debugfs support for CI This allows you to look at the current DPM state via debugfs. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 11 +++++++++++ drivers/gpu/drm/radeon/cikd.h | 12 ++++++++++++ drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 2 ++ 4 files changed, 26 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 72ab92b60e6e..28b2b36f699d 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -27,6 +27,7 @@ #include "r600_dpm.h" #include "ci_dpm.h" #include "atom.h" +#include #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -4965,6 +4966,16 @@ int ci_dpm_init(struct radeon_device *rdev) return 0; } +void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + u32 sclk = ci_get_average_sclk_freq(rdev); + u32 mclk = ci_get_average_mclk_freq(rdev); + + seq_printf(m, "power level avg sclk: %u mclk: %u\n", + sclk, mclk); +} + void ci_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *rps) { diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 861fb3ec161c..9716ffcded43 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -110,6 +110,14 @@ # define RESET_SCLK_CNT (1 << 5) # define DYNAMIC_PM_EN (1 << 21) +#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014 +# define CURRENT_STATE_MASK (0xf << 4) +# define CURRENT_STATE_SHIFT 4 +# define CURR_MCLK_INDEX_MASK (0xf << 8) +# define CURR_MCLK_INDEX_SHIFT 8 +# define CURR_SCLK_INDEX_MASK (0x1f << 16) +# define CURR_SCLK_INDEX_SHIFT 16 + #define CG_SSP 0xC0200044 # define SST(x) ((x) << 0) # define SST_MASK (0xffff << 0) @@ -126,6 +134,10 @@ # define DISP_GAP_MCHG(x) ((x) << 24) # define DISP_GAP_MCHG_MASK (3 << 24) +#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0 +# define CURR_PCIE_INDEX_MASK (0xf << 24) +# define CURR_PCIE_INDEX_SHIFT 24 + #define CG_ULV_PARAMETER 0xC0200158 #define CG_FTV_0 0xC02001A8 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index c633fa53def0..e28f08bbd4c0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2467,6 +2467,7 @@ static struct radeon_asic ci_asic = { .get_sclk = &ci_dpm_get_sclk, .get_mclk = &ci_dpm_get_mclk, .print_power_state = &ci_dpm_print_power_state, + .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 350da1704964..371a6f2c60ee 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -763,6 +763,8 @@ u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low); void ci_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); +void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); int kv_dpm_init(struct radeon_device *rdev); int kv_dpm_enable(struct radeon_device *rdev); -- cgit v1.2.3 From 89536fd600f9997e776eef6c666cc61ea8c55f07 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 15 Jul 2013 18:14:24 -0400 Subject: drm/radeon/dpm: implement force performance level for CI Allows you to force the selected performance level via sysfs. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 153 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/ppsmc.h | 1 + drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 2 + 4 files changed, 157 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 28b2b36f699d..9eebf1f7e223 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -3601,6 +3601,153 @@ static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, return 0; } +static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, + u32 level_mask) +{ + u32 level = 0; + + while ((level_mask & (1 << level)) == 0) + level++; + + return level; +} + + +int ci_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + PPSMC_Result smc_result; + u32 tmp, levels, i; + int ret; + + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { + if ((!pi->sclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_sclk(rdev, levels); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & + CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + if ((!pi->mclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_mclk(rdev, levels); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & + CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + if ((!pi->pcie_dpm_key_disabled) && + pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_pcie(rdev, level); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & + CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { + if ((!pi->sclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(rdev, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask); + ret = ci_dpm_force_state_sclk(rdev, levels); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & + CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + if ((!pi->mclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(rdev, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + ret = ci_dpm_force_state_mclk(rdev, levels); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & + CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + if ((!pi->pcie_dpm_key_disabled) && + pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(rdev, + pi->dpm_level_enable_mask.pcie_dpm_enable_mask); + ret = ci_dpm_force_state_pcie(rdev, levels); + if (ret) + return ret; + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & + CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { + if (!pi->sclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + if (!pi->mclk_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + if (!pi->pcie_dpm_key_disabled) { + smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + } + + rdev->pm.dpm.forced_level = level; + + return 0; +} + static int ci_set_mc_special_registers(struct radeon_device *rdev, struct ci_mc_reg_table *table) { @@ -4548,6 +4695,12 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) if (pi->pcie_performance_request) ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); + ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); + if (ret) { + DRM_ERROR("ci_dpm_force_performance_level failed\n"); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h index 4c1ee6df09a0..682842804bce 100644 --- a/drivers/gpu/drm/radeon/ppsmc.h +++ b/drivers/gpu/drm/radeon/ppsmc.h @@ -119,6 +119,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) #define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) #define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) #define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) #define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) #define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e28f08bbd4c0..705bc7498ff2 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2468,6 +2468,7 @@ static struct radeon_asic ci_asic = { .get_mclk = &ci_dpm_get_mclk, .print_power_state = &ci_dpm_print_power_state, .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, + .force_performance_level = &ci_dpm_force_performance_level, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 371a6f2c60ee..a88792865fa4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -765,6 +765,8 @@ void ci_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); +int ci_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level); int kv_dpm_init(struct radeon_device *rdev); int kv_dpm_enable(struct radeon_device *rdev); -- cgit v1.2.3 From 5496131e458e6bbee7f7bdf6f116d150c6dcf728 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 15 Jul 2013 18:24:31 -0400 Subject: drm/radeon/dpm: implement vblank_too_short callback for CI Check if we can switch the mclk during the vblank time otherwise we may get artifacts on the screen when the mclk changes. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 16 +++++++++++++++- drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 9eebf1f7e223..6e3d387a7388 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -682,6 +682,19 @@ static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) ci_update_uvd_dpm(rdev, gate); } +bool ci_dpm_vblank_too_short(struct radeon_device *rdev) +{ + struct ci_power_info *pi = ci_get_pi(rdev); + u32 vblank_time = r600_dpm_get_vblank_time(rdev); + u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + + if (vblank_time < switch_limit) + return true; + else + return false; + +} + static void ci_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -692,7 +705,8 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, u32 sclk, mclk; int i; - if (rdev->pm.dpm.new_active_crtc_count > 1) + if ((rdev->pm.dpm.new_active_crtc_count > 1) || + ci_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; else disable_mclk_switching = false; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 705bc7498ff2..2d7bdda90bba 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2469,6 +2469,7 @@ static struct radeon_asic ci_asic = { .print_power_state = &ci_dpm_print_power_state, .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, .force_performance_level = &ci_dpm_force_performance_level, + .vblank_too_short = &ci_dpm_vblank_too_short, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index a88792865fa4..b5f4e431c493 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -767,6 +767,7 @@ void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); int ci_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +bool ci_dpm_vblank_too_short(struct radeon_device *rdev); int kv_dpm_init(struct radeon_device *rdev); int kv_dpm_enable(struct radeon_device *rdev); -- cgit v1.2.3 From ae3e40e8712414321ef2b61e8bb26a5d9701643b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 18 Jul 2013 16:39:53 -0400 Subject: drm/radeon/dpm: add debugfs support for KB/KV This allows you to look at the current DPM state via debugfs. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cikd.h | 4 ++++ drivers/gpu/drm/radeon/kv_dpm.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 2 ++ 4 files changed, 30 insertions(+) diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 9716ffcded43..259b81c7cdd8 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -134,6 +134,10 @@ # define DISP_GAP_MCHG(x) ((x) << 24) # define DISP_GAP_MCHG_MASK (3 << 24) +#define SMU_VOLTAGE_STATUS 0xC0200094 +# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1) +# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1 + #define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0 # define CURR_PCIE_INDEX_MASK (0xf << 24) # define CURR_PCIE_INDEX_SHIFT 24 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 2e4016356dab..d584ee4a09ad 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -26,6 +26,7 @@ #include "cikd.h" #include "r600_dpm.h" #include "kv_dpm.h" +#include #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 #define KV_MINIMUM_ENGINE_CLOCK 800 @@ -2481,6 +2482,28 @@ int kv_dpm_init(struct radeon_device *rdev) return 0; } +void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 current_index = + (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> + CURR_SCLK_INDEX_SHIFT; + u32 sclk, tmp; + u16 vddc; + + if (current_index >= SMU__NUM_SCLK_DPM_STATE) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); + tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> + SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; + vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); + seq_printf(m, "power level %d sclk: %u vddc: %u\n", + current_index, sclk, vddc); + } +} + void kv_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *rps) { diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 2d7bdda90bba..b25172bc1ef2 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2640,6 +2640,7 @@ static struct radeon_asic kv_asic = { .get_sclk = &kv_dpm_get_sclk, .get_mclk = &kv_dpm_get_mclk, .print_power_state = &kv_dpm_print_power_state, + .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b5f4e431c493..80ad5d89e4bb 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -782,5 +782,7 @@ u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low); void kv_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); +void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); #endif -- cgit v1.2.3 From 2b4c8022fa95de43f590629b3af611e531e7199a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 18 Jul 2013 16:48:46 -0400 Subject: drm/radeon/dpm: implement force performance level for KB/KV Allows you to force the selected performance level via sysfs. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/kv_dpm.c | 43 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 2 ++ 3 files changed, 46 insertions(+) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index d584ee4a09ad..c26c4e3005e7 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -40,6 +40,7 @@ static void kv_enable_new_levels(struct radeon_device *rdev); static void kv_program_nbps_index_settings(struct radeon_device *rdev, struct radeon_ps *new_rps); static int kv_set_enabled_levels(struct radeon_device *rdev); +static int kv_force_dpm_highest(struct radeon_device *rdev); static int kv_force_dpm_lowest(struct radeon_device *rdev); static void kv_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *new_rps, @@ -1641,6 +1642,30 @@ static int kv_enable_nb_dpm(struct radeon_device *rdev) return ret; } +int kv_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level) +{ + int ret; + + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { + ret = kv_force_dpm_highest(rdev); + if (ret) + return ret; + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { + ret = kv_force_dpm_lowest(rdev); + if (ret) + return ret; + } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { + ret = kv_unforce_levels(rdev); + if (ret) + return ret; + } + + rdev->pm.dpm.forced_level = level; + + return 0; +} + int kv_dpm_pre_set_power_state(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1720,6 +1745,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) kv_enable_nb_dpm(rdev); } } + rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; return 0; } @@ -1796,6 +1822,23 @@ static void kv_construct_boot_state(struct radeon_device *rdev) pi->boot_pl.vce_wm = 0; } +static int kv_force_dpm_highest(struct radeon_device *rdev) +{ + int ret; + u32 enable_mask, i; + + ret = kv_dpm_get_enable_mask(rdev, &enable_mask); + if (ret) + return ret; + + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { + if (enable_mask & (1 << i)) + break; + } + + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); +} + static int kv_force_dpm_lowest(struct radeon_device *rdev) { int ret; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index b25172bc1ef2..1dad80c0e17f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2641,6 +2641,7 @@ static struct radeon_asic kv_asic = { .get_mclk = &kv_dpm_get_mclk, .print_power_state = &kv_dpm_print_power_state, .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, + .force_performance_level = &kv_dpm_force_performance_level, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 80ad5d89e4bb..1e386c48ae2d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -784,5 +784,7 @@ void kv_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); +int kv_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level); #endif -- cgit v1.2.3 From 9e9d976205626c3bd92776181cde6a2dda648c2b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2013 18:13:23 -0400 Subject: drm/radeon/dpm: add new callback for powergating UVD (v4) Starting on CIK, multi-media blocks like UVD no longer have special power state. Rather they have their own DPM implementation which adjusts their clocks dynamically when active. When they are not active, the blocks are powergated to save power. v2: add missing pm locks v3: rebase on uvd state selection rework v4: fix inverted logic typo noticed by Christian Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 2 ++ drivers/gpu/drm/radeon/radeon_pm.c | 43 ++++++++++++++++++++++---------------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 930650ec769c..5e21dbeaf314 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1730,6 +1730,7 @@ struct radeon_asic { void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m); int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool (*vblank_too_short)(struct radeon_device *rdev); + void (*powergate_uvd)(struct radeon_device *rdev, bool gate); } dpm; /* pageflipping */ struct { @@ -2518,6 +2519,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m)) #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) +#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 66b04af16949..d7555369a3e5 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -886,28 +886,35 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) { enum radeon_pm_state_type dpm_state; - if (enable) { + if (rdev->asic->dpm.powergate_uvd) { mutex_lock(&rdev->pm.mutex); - rdev->pm.dpm.uvd_active = true; - if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; - else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; - else - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; - rdev->pm.dpm.state = dpm_state; + /* enable/disable UVD */ + radeon_dpm_powergate_uvd(rdev, !enable); mutex_unlock(&rdev->pm.mutex); } else { - mutex_lock(&rdev->pm.mutex); - rdev->pm.dpm.uvd_active = false; - mutex_unlock(&rdev->pm.mutex); - } + if (enable) { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.uvd_active = true; + if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; + else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; + else + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; + rdev->pm.dpm.state = dpm_state; + mutex_unlock(&rdev->pm.mutex); + } else { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.uvd_active = false; + mutex_unlock(&rdev->pm.mutex); + } - radeon_pm_compute_clocks(rdev); + radeon_pm_compute_clocks(rdev); + } } static void radeon_pm_suspend_old(struct radeon_device *rdev) -- cgit v1.2.3 From 5e884f606cdba9c599c9c9373808f272ae794088 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Aug 2013 11:39:38 -0400 Subject: drm/radeon: restructure UVD code to handle UVD PG (v2) When we PG (powergate) UVD, we need to re-initialize it before we can use it again. v2: rebase on UVD stop fixes Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 14 +++---- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/ni.c | 2 +- drivers/gpu/drm/radeon/r600.c | 74 +++++++++++++++++++----------------- drivers/gpu/drm/radeon/radeon_asic.h | 4 +- drivers/gpu/drm/radeon/rv770.c | 2 +- drivers/gpu/drm/radeon/si.c | 2 +- 7 files changed, 50 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 736a416b51a7..59b866aa08d9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -69,6 +69,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev); +static void cik_uvd_resume(struct radeon_device *rdev); /* get temperature in millidegrees */ int ci_get_temp(struct radeon_device *rdev) @@ -7619,8 +7620,9 @@ static int cik_startup(struct radeon_device *rdev) return r; } - r = cik_uvd_resume(rdev); + r = radeon_uvd_resume(rdev); if (!r) { + cik_uvd_resume(rdev); r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); if (r) @@ -7708,7 +7710,7 @@ static int cik_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = r600_uvd_init(rdev, true); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } @@ -8598,15 +8600,10 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return r; } -int cik_uvd_resume(struct radeon_device *rdev) +static void cik_uvd_resume(struct radeon_device *rdev) { uint64_t addr; uint32_t size; - int r; - - r = radeon_uvd_resume(rdev); - if (r) - return r; /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; @@ -8632,7 +8629,6 @@ int cik_uvd_resume(struct radeon_device *rdev) addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); - return 0; } static void cik_pcie_gen3_enable(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 2ce12ee3e67f..710c1d4ae5db 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5296,7 +5296,7 @@ static int evergreen_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = r600_uvd_init(rdev, true); if (r) DRM_ERROR("radeon: error initializing UVD (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 21f2eceff2c6..bc298a3500a4 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2230,7 +2230,7 @@ static int cayman_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = r600_uvd_init(rdev, true); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3db2e4ddb2d6..8a600153ef6c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2623,7 +2623,7 @@ void r600_dma_fini(struct radeon_device *rdev) /* * UVD */ -int r600_uvd_rbc_start(struct radeon_device *rdev) +static int r600_uvd_rbc_start(struct radeon_device *rdev, bool ring_test) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; uint64_t rptr_addr; @@ -2664,47 +2664,47 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) rb_bufsz = (0x1 << 8) | rb_bufsz; WREG32(UVD_RBC_RB_CNTL, rb_bufsz); - ring->ready = true; - r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); - if (r) { - ring->ready = false; - return r; - } + if (ring_test) { + ring->ready = true; + r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } - r = radeon_ring_lock(rdev, ring, 10); - if (r) { - DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); - return r; - } + r = radeon_ring_lock(rdev, ring, 10); + if (r) { + DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); + return r; + } - tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); + tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); - tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); + tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); - tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); + tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); - /* Clear timeout status bits */ - radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); - radeon_ring_write(ring, 0x8); + /* Clear timeout status bits */ + radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); + radeon_ring_write(ring, 0x8); - radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); - radeon_ring_write(ring, 3); + radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); + radeon_ring_write(ring, 3); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring); + } return 0; } -void r600_uvd_stop(struct radeon_device *rdev) +void r600_do_uvd_stop(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - /* force RBC into idle state */ WREG32(UVD_RBC_RB_CNTL, 0x11010101); @@ -2723,11 +2723,17 @@ void r600_uvd_stop(struct radeon_device *rdev) /* Unstall UMC and register bus */ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); +} +void r600_uvd_stop(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + + r600_do_uvd_stop(rdev); ring->ready = false; } -int r600_uvd_init(struct radeon_device *rdev) +int r600_uvd_init(struct radeon_device *rdev, bool ring_test) { int i, j, r; /* disable byte swapping */ @@ -2815,17 +2821,17 @@ int r600_uvd_init(struct radeon_device *rdev) if (r) { DRM_ERROR("UVD not responding, giving up!!!\n"); - radeon_set_uvd_clocks(rdev, 0, 0); - return r; + goto done; } /* enable interupt */ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1)); - r = r600_uvd_rbc_start(rdev); + r = r600_uvd_rbc_start(rdev, ring_test); if (!r) DRM_INFO("UVD initialized successfully.\n"); +done: /* lower clocks again */ radeon_set_uvd_clocks(rdev, 0, 0); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 1e386c48ae2d..3570817a5847 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -424,8 +424,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde struct seq_file *m); /* uvd */ -int r600_uvd_init(struct radeon_device *rdev); -int r600_uvd_rbc_start(struct radeon_device *rdev); +int r600_uvd_init(struct radeon_device *rdev, bool ring_test); void r600_uvd_stop(struct radeon_device *rdev); int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_uvd_fence_emit(struct radeon_device *rdev, @@ -696,7 +695,6 @@ u32 cik_get_xclk(struct radeon_device *rdev); uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); -int cik_uvd_resume(struct radeon_device *rdev); void cik_sdma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 95590bd07afb..52253b2ab0d5 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1928,7 +1928,7 @@ static int rv770_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = r600_uvd_init(rdev, true); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 4f91e1f4d814..da23ce8f4388 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6421,7 +6421,7 @@ static int si_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = r600_uvd_init(rdev, true); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } -- cgit v1.2.3 From 77df508a98834d8e2fe4c7c4e1089a1ce66ccaa1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 9 Aug 2013 10:02:40 -0400 Subject: drm/radeon/dpm: implement UVD powergating for KB/KV Powergate the UVD block when not in use to save power. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 4 ++-- drivers/gpu/drm/radeon/kv_dpm.c | 22 +++++++++++++++------- drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 1 + 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 59b866aa08d9..e661aec734b2 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -69,7 +69,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev); -static void cik_uvd_resume(struct radeon_device *rdev); +void cik_uvd_resume(struct radeon_device *rdev); /* get temperature in millidegrees */ int ci_get_temp(struct radeon_device *rdev) @@ -8600,7 +8600,7 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return r; } -static void cik_uvd_resume(struct radeon_device *rdev) +void cik_uvd_resume(struct radeon_device *rdev) { uint64_t addr; uint32_t size; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index c26c4e3005e7..3e232a4d3f4c 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -49,7 +49,7 @@ static int kv_set_thermal_temperature_range(struct radeon_device *rdev, int min_temp, int max_temp); static int kv_init_fps_limits(struct radeon_device *rdev); -static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); +void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); @@ -59,6 +59,10 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); extern void cik_update_cg(struct radeon_device *rdev, u32 block, bool enable); +extern void cik_uvd_resume(struct radeon_device *rdev); +extern int r600_uvd_init(struct radeon_device *rdev, bool ring_test); +extern void r600_do_uvd_stop(struct radeon_device *rdev); + static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -1201,6 +1205,7 @@ int kv_dpm_enable(struct radeon_device *rdev) kv_dpm_powergate_acp(rdev, true); kv_dpm_powergate_samu(rdev, true); kv_dpm_powergate_vce(rdev, true); + kv_dpm_powergate_uvd(rdev, true); kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); @@ -1458,7 +1463,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) return kv_enable_acp_dpm(rdev, !gate); } -static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) +void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1468,13 +1473,18 @@ static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) pi->uvd_power_gated = gate; if (gate) { - kv_update_uvd_dpm(rdev, true); + r600_do_uvd_stop(rdev); + cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); + kv_update_uvd_dpm(rdev, gate); if (pi->caps_uvd_pg) kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); } else { if (pi->caps_uvd_pg) kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); - kv_update_uvd_dpm(rdev, false); + cik_uvd_resume(rdev); + r600_uvd_init(rdev, false); + cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); + kv_update_uvd_dpm(rdev, gate); } } @@ -1714,7 +1724,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) return ret; } #endif - kv_update_uvd_dpm(rdev, false); kv_update_sclk_t(rdev); } } else { @@ -1740,7 +1749,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) return ret; } #endif - kv_update_uvd_dpm(rdev, false); kv_update_sclk_t(rdev); kv_enable_nb_dpm(rdev); } @@ -2502,7 +2510,7 @@ int kv_dpm_init(struct radeon_device *rdev) pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ - pi->caps_uvd_pg = false; /* XXX */ + pi->caps_uvd_pg = true; pi->caps_uvd_dpm = true; pi->caps_vce_pg = false; pi->caps_samu_pg = false; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 1dad80c0e17f..63b6aae66236 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2642,6 +2642,7 @@ static struct radeon_asic kv_asic = { .print_power_state = &kv_dpm_print_power_state, .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, .force_performance_level = &kv_dpm_force_performance_level, + .powergate_uvd = &kv_dpm_powergate_uvd, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3570817a5847..9060757e4dc1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -784,5 +784,6 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); int kv_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); #endif -- cgit v1.2.3 From 942bdf7f9ebc9a46e3f9b3c235112c0947905453 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 9 Aug 2013 10:05:24 -0400 Subject: drm/radeon/dpm: implement UVD powergating for CI Disable the UVD block when not in use to save power. The block is not actually powergated on CI, but we switch between UVD DPM (where the uvd clocks are adjusted on demand) and clocks off. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 7 +------ drivers/gpu/drm/radeon/radeon_asic.c | 1 + drivers/gpu/drm/radeon/radeon_asic.h | 1 + 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 6e3d387a7388..e4d9d50ce908 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -677,7 +677,7 @@ static int ci_power_control_set_level(struct radeon_device *rdev) return ret; } -static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) +void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) { ci_update_uvd_dpm(rdev, gate); } @@ -4674,11 +4674,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) return ret; } #endif - ret = ci_update_uvd_dpm(rdev, false); - if (ret) { - DRM_ERROR("ci_update_uvd_dpm failed\n"); - return ret; - } ret = ci_update_sclk_t(rdev); if (ret) { DRM_ERROR("ci_update_sclk_t failed\n"); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 63b6aae66236..61c06449b31a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2470,6 +2470,7 @@ static struct radeon_asic ci_asic = { .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, .force_performance_level = &ci_dpm_force_performance_level, .vblank_too_short = &ci_dpm_vblank_too_short, + .powergate_uvd = &ci_dpm_powergate_uvd, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 9060757e4dc1..5630291c4b06 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -766,6 +766,7 @@ void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, int ci_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool ci_dpm_vblank_too_short(struct radeon_device *rdev); +void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); int kv_dpm_init(struct radeon_device *rdev); int kv_dpm_enable(struct radeon_device *rdev); -- cgit v1.2.3 From f75195cac32bfd2ef07764bd370d3b788bd8b003 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 12 Aug 2013 11:24:05 -0400 Subject: drm/radeon/dpm: add reclocking quirk for ASUS K70AF The LCD has a relatively short vblank time (216us), but the card is able to reclock memory fine in that time. Signed-off-by: Alex Deucher Reported-by: normalrawr@gmail.com --- drivers/gpu/drm/radeon/rv770_dpm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 094c67a29d0d..44c1e782a696 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2517,8 +2517,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) { u32 vblank_time = r600_dpm_get_vblank_time(rdev); + u32 switch_limit = 300; - if (vblank_time < 300) + /* quirks */ + /* ASUS K70AF */ + if ((rdev->pdev->device == 0x9553) && + (rdev->pdev->subsystem_vendor == 0x1043) && + (rdev->pdev->subsystem_device == 0x1c42)) + switch_limit = 200; + + if (vblank_time < switch_limit) return true; else return false; -- cgit v1.2.3 From 1bd4cff651350380f9fb6847313cb78e84c03846 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 12 Aug 2013 11:35:02 -0400 Subject: drm/radeon/dpm: adjust the vblank time checks for eg, ni, si According to the internal teams, we never hit the limit for mclk switching on these asics, so we can disable the check. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cypress_dpm.c | 3 ++- drivers/gpu/drm/radeon/ni_dpm.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 7e5d0b570a30..95a66db08d9b 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -2166,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* we never hit the non-gddr5 limit so disable it */ + u32 switch_limit = pi->mem_gddr5 ? 450 : 0; if (vblank_time < switch_limit) return true; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index f0f5f748938a..238078c2b319 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* we never hit the non-gddr5 limit so disable it */ + u32 switch_limit = pi->mem_gddr5 ? 450 : 0; if (vblank_time < switch_limit) return true; -- cgit v1.2.3 From 4543eda52113d1e2cc0e9bf416f79597e6ef1ec7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 7 Aug 2013 19:34:53 -0400 Subject: drm/radeon: fix endian bugs in hw i2c atom routines Need to swap the data fetched over i2c properly. This is the same fix as the endian fix for aux channel transactions. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/atombios_dp.c | 6 +++--- drivers/gpu/drm/radeon/atombios_i2c.c | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 32501f6ec991..16023986d301 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -50,7 +50,7 @@ static char *pre_emph_names[] = { * or from atom. Note that atom operates on * dw units. */ -static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) +void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ @@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); - radeon_copy_swap(base, send, send_bytes, true); + radeon_atom_copy_swap(base, send, send_bytes, true); args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); @@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, recv_bytes = recv_size; if (recv && recv_size) - radeon_copy_swap(recv, base + 16, recv_bytes, false); + radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); return recv_bytes; } diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 082338df708a..2ca389d19258 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -27,6 +27,8 @@ #include "radeon.h" #include "atom.h" +extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); + #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ @@ -77,7 +79,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, } if (!(flags & HW_I2C_WRITE)) - memcpy(buf, base, num); + radeon_atom_copy_swap(buf, base, num, false); return 0; } -- cgit v1.2.3 From 76a0df859defc53e6cb61f698a48ac7da92c8d84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Aug 2013 11:56:50 +0200 Subject: drm/radeon: rework ring function handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Give the ring functions a separate structure and let the asic structure point to the ring specific functions. This simplifies the code and allows us to make changes at only one point. No change in functionality. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 72 ++- drivers/gpu/drm/radeon/radeon_asic.c | 1026 +++++++++------------------------- drivers/gpu/drm/radeon/radeon_cs.c | 2 +- 3 files changed, 313 insertions(+), 787 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5e21dbeaf314..b26a20fe2859 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1578,6 +1578,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, unsigned nfiles); int radeon_debugfs_fence_init(struct radeon_device *rdev); +/* + * ASIC ring specific functions. + */ +struct radeon_asic_ring { + /* ring read/write ptr handling */ + u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring); + u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); + void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); + + /* validating and patching of IBs */ + int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); + int (*cs_parse)(struct radeon_cs_parser *p); + + /* command emmit functions */ + void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); + void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); + void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, + struct radeon_semaphore *semaphore, bool emit_wait); + void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); + + /* testing functions */ + int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); + int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); + bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); + + /* deprecated */ + void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); +}; /* * ASIC specific functions. @@ -1621,23 +1649,7 @@ struct radeon_asic { uint32_t incr, uint32_t flags); } vm; /* ring specific callbacks */ - struct { - void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); - int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); - void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); - void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, - struct radeon_semaphore *semaphore, bool emit_wait); - int (*cs_parse)(struct radeon_cs_parser *p); - void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); - int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); - int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); - bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); - void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); - - u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring); - u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); - void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); - } ring[RADEON_NUM_RINGS]; + struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; /* irqs */ struct { int (*set)(struct radeon_device *rdev); @@ -2442,7 +2454,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) -#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) +#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) @@ -2450,16 +2462,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) -#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) -#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) -#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) -#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) -#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) -#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) -#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) -#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r)) -#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r)) -#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r)) +#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) +#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) +#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) +#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) +#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) +#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) +#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm)) +#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r)) +#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r)) +#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r)) #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) @@ -2467,8 +2479,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) #define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b)) #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) -#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) -#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) +#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) +#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 61c06449b31a..012fe7218c74 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev) /* * ASIC */ + +static struct radeon_asic_ring r100_gfx_ring = { + .ib_execute = &r100_ring_ib_execute, + .emit_fence = &r100_fence_ring_emit, + .emit_semaphore = &r100_semaphore_ring_emit, + .cs_parse = &r100_cs_parse, + .ring_start = &r100_ring_start, + .ring_test = &r100_ring_test, + .ib_test = &r100_ib_test, + .is_lockup = &r100_gpu_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic r100_asic = { .init = &r100_init, .fini = &r100_fini, @@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = { .set_page = &r100_pci_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r100_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r100_cs_parse, - .ring_start = &r100_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = { .set_page = &r100_pci_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r100_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r100_cs_parse, - .ring_start = &r100_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = { }, }; +static struct radeon_asic_ring r300_gfx_ring = { + .ib_execute = &r100_ring_ib_execute, + .emit_fence = &r300_fence_ring_emit, + .emit_semaphore = &r100_semaphore_ring_emit, + .cs_parse = &r300_cs_parse, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ib_test = &r100_ib_test, + .is_lockup = &r100_gpu_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, @@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = { .set_page = &r100_pci_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = { .set_page = &rs400_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &r100_irq_set, @@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = { .set_page = &rs400_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &rv515_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r100_ring_ib_execute, - .emit_fence = &r300_fence_ring_emit, - .emit_semaphore = &r100_semaphore_ring_emit, - .cs_parse = &r300_cs_parse, - .ring_start = &rv515_ring_start, - .ring_test = &r100_ring_test, - .ib_test = &r100_ib_test, - .is_lockup = &r100_gpu_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = { }, }; +static struct radeon_asic_ring r600_gfx_ring = { + .ib_execute = &r600_ring_ib_execute, + .emit_fence = &r600_fence_ring_emit, + .emit_semaphore = &r600_semaphore_ring_emit, + .cs_parse = &r600_cs_parse, + .ring_test = &r600_ring_test, + .ib_test = &r600_ib_test, + .is_lockup = &r600_gfx_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + +static struct radeon_asic_ring r600_dma_ring = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = &r600_dma_cs_parse, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic r600_asic = { .init = &r600_init, .fini = &r600_fini, @@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r600_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &r600_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &r600_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &r600_dma_ring_ib_execute, - .emit_fence = &r600_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &r600_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &r600_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, }, .irq = { .set = &r600_irq_set, @@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r600_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &r600_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &r600_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &r600_dma_ring_ib_execute, - .emit_fence = &r600_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &r600_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &r600_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, }, .irq = { .set = &r600_irq_set, @@ -1187,30 +1078,8 @@ static struct radeon_asic rs780_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r600_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &r600_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &r600_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &r600_dma_ring_ib_execute, - .emit_fence = &r600_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &r600_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &r600_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, }, .irq = { .set = &r600_irq_set, @@ -1280,6 +1149,19 @@ static struct radeon_asic rs780_asic = { }, }; +static struct radeon_asic_ring rv770_uvd_ring = { + .ib_execute = &r600_uvd_ib_execute, + .emit_fence = &r600_uvd_fence_emit, + .emit_semaphore = &r600_uvd_semaphore_emit, + .cs_parse = &radeon_uvd_cs_parse, + .ring_test = &r600_uvd_ring_test, + .ib_test = &r600_uvd_ib_test, + .is_lockup = &radeon_ring_test_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic rv770_asic = { .init = &rv770_init, .fini = &rv770_fini, @@ -1297,42 +1179,9 @@ static struct radeon_asic rv770_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &r600_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &r600_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &r600_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &r600_dma_ring_ib_execute, - .emit_fence = &r600_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &r600_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &r600_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &r600_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, }, .irq = { .set = &r600_irq_set, @@ -1405,6 +1254,32 @@ static struct radeon_asic rv770_asic = { }, }; +static struct radeon_asic_ring evergreen_gfx_ring = { + .ib_execute = &evergreen_ring_ib_execute, + .emit_fence = &r600_fence_ring_emit, + .emit_semaphore = &r600_semaphore_ring_emit, + .cs_parse = &evergreen_cs_parse, + .ring_test = &r600_ring_test, + .ib_test = &r600_ib_test, + .is_lockup = &evergreen_gfx_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + +static struct radeon_asic_ring evergreen_dma_ring = { + .ib_execute = &evergreen_dma_ring_ib_execute, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = &evergreen_dma_cs_parse, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &evergreen_dma_is_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic evergreen_asic = { .init = &evergreen_init, .fini = &evergreen_fini, @@ -1422,42 +1297,9 @@ static struct radeon_asic evergreen_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &evergreen_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &evergreen_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &evergreen_dma_ring_ib_execute, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &evergreen_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &r600_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, }, .irq = { .set = &evergreen_irq_set, @@ -1547,42 +1389,9 @@ static struct radeon_asic sumo_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &evergreen_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &evergreen_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &evergreen_dma_ring_ib_execute, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &evergreen_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &r600_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, }, .irq = { .set = &evergreen_irq_set, @@ -1671,42 +1480,9 @@ static struct radeon_asic btc_asic = { .set_page = &rs600_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &evergreen_ring_ib_execute, - .emit_fence = &r600_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &evergreen_gfx_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &evergreen_dma_ring_ib_execute, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &evergreen_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &r600_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, }, .irq = { .set = &evergreen_irq_set, @@ -1779,6 +1555,49 @@ static struct radeon_asic btc_asic = { }, }; +static struct radeon_asic_ring cayman_gfx_ring = { + .ib_execute = &cayman_ring_ib_execute, + .ib_parse = &evergreen_ib_parse, + .emit_fence = &cayman_fence_ring_emit, + .emit_semaphore = &r600_semaphore_ring_emit, + .cs_parse = &evergreen_cs_parse, + .ring_test = &r600_ring_test, + .ib_test = &r600_ib_test, + .is_lockup = &cayman_gfx_is_lockup, + .vm_flush = &cayman_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + +static struct radeon_asic_ring cayman_dma_ring = { + .ib_execute = &cayman_dma_ring_ib_execute, + .ib_parse = &evergreen_dma_ib_parse, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = &evergreen_dma_cs_parse, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &cayman_dma_is_lockup, + .vm_flush = &cayman_dma_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr +}; + +static struct radeon_asic_ring cayman_uvd_ring = { + .ib_execute = &r600_uvd_ib_execute, + .emit_fence = &r600_uvd_fence_emit, + .emit_semaphore = &cayman_uvd_semaphore_emit, + .cs_parse = &radeon_uvd_cs_parse, + .ring_test = &r600_uvd_ring_test, + .ib_test = &r600_uvd_ib_test, + .is_lockup = &radeon_ring_test_lockup, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic cayman_asic = { .init = &cayman_init, .fini = &cayman_fini, @@ -1802,88 +1621,12 @@ static struct radeon_asic cayman_asic = { .set_page = &cayman_vm_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP1_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP2_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &cayman_dma_is_lockup, - .vm_flush = &cayman_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_DMA1_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &cayman_dma_is_lockup, - .vm_flush = &cayman_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, + [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring, + [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring, + [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, }, .irq = { .set = &evergreen_irq_set, @@ -1979,88 +1722,12 @@ static struct radeon_asic trinity_asic = { .set_page = &cayman_vm_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP1_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP2_INDEX] = { - .ib_execute = &cayman_ring_ib_execute, - .ib_parse = &evergreen_ib_parse, - .emit_fence = &cayman_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = &evergreen_cs_parse, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &cayman_gfx_is_lockup, - .vm_flush = &cayman_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &cayman_dma_is_lockup, - .vm_flush = &cayman_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_DMA1_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = &evergreen_dma_cs_parse, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &cayman_dma_is_lockup, - .vm_flush = &cayman_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, + [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring, + [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring, + [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, }, .irq = { .set = &evergreen_irq_set, @@ -2130,6 +1797,36 @@ static struct radeon_asic trinity_asic = { }, }; +static struct radeon_asic_ring si_gfx_ring = { + .ib_execute = &si_ring_ib_execute, + .ib_parse = &si_ib_parse, + .emit_fence = &si_fence_ring_emit, + .emit_semaphore = &r600_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_ring_test, + .ib_test = &r600_ib_test, + .is_lockup = &si_gfx_is_lockup, + .vm_flush = &si_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + +static struct radeon_asic_ring si_dma_ring = { + .ib_execute = &cayman_dma_ring_ib_execute, + .ib_parse = &evergreen_dma_ib_parse, + .emit_fence = &evergreen_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &si_dma_is_lockup, + .vm_flush = &si_dma_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic si_asic = { .init = &si_init, .fini = &si_fini, @@ -2153,88 +1850,12 @@ static struct radeon_asic si_asic = { .set_page = &si_vm_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &si_ring_ib_execute, - .ib_parse = &si_ib_parse, - .emit_fence = &si_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &si_gfx_is_lockup, - .vm_flush = &si_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP1_INDEX] = { - .ib_execute = &si_ring_ib_execute, - .ib_parse = &si_ib_parse, - .emit_fence = &si_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &si_gfx_is_lockup, - .vm_flush = &si_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP2_INDEX] = { - .ib_execute = &si_ring_ib_execute, - .ib_parse = &si_ib_parse, - .emit_fence = &si_fence_ring_emit, - .emit_semaphore = &r600_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &r600_ring_test, - .ib_test = &r600_ib_test, - .is_lockup = &si_gfx_is_lockup, - .vm_flush = &si_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &si_dma_is_lockup, - .vm_flush = &si_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_DMA1_INDEX] = { - .ib_execute = &cayman_dma_ring_ib_execute, - .ib_parse = &evergreen_dma_ib_parse, - .emit_fence = &evergreen_dma_fence_ring_emit, - .emit_semaphore = &r600_dma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &r600_dma_ring_test, - .ib_test = &r600_dma_ib_test, - .is_lockup = &si_dma_is_lockup, - .vm_flush = &si_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, + [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring, + [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring, + [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring, + [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, }, .irq = { .set = &si_irq_set, @@ -2305,6 +1926,51 @@ static struct radeon_asic si_asic = { }, }; +static struct radeon_asic_ring ci_gfx_ring = { + .ib_execute = &cik_ring_ib_execute, + .ib_parse = &cik_ib_parse, + .emit_fence = &cik_fence_gfx_ring_emit, + .emit_semaphore = &cik_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &cik_ring_test, + .ib_test = &cik_ib_test, + .is_lockup = &cik_gfx_is_lockup, + .vm_flush = &cik_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + +static struct radeon_asic_ring ci_cp_ring = { + .ib_execute = &cik_ring_ib_execute, + .ib_parse = &cik_ib_parse, + .emit_fence = &cik_fence_compute_ring_emit, + .emit_semaphore = &cik_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &cik_ring_test, + .ib_test = &cik_ib_test, + .is_lockup = &cik_gfx_is_lockup, + .vm_flush = &cik_vm_flush, + .get_rptr = &cik_compute_ring_get_rptr, + .get_wptr = &cik_compute_ring_get_wptr, + .set_wptr = &cik_compute_ring_set_wptr, +}; + +static struct radeon_asic_ring ci_dma_ring = { + .ib_execute = &cik_sdma_ring_ib_execute, + .ib_parse = &cik_ib_parse, + .emit_fence = &cik_sdma_fence_ring_emit, + .emit_semaphore = &cik_sdma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &cik_sdma_ring_test, + .ib_test = &cik_sdma_ib_test, + .is_lockup = &cik_sdma_is_lockup, + .vm_flush = &cik_dma_vm_flush, + .get_rptr = &radeon_ring_generic_get_rptr, + .get_wptr = &radeon_ring_generic_get_wptr, + .set_wptr = &radeon_ring_generic_set_wptr, +}; + static struct radeon_asic ci_asic = { .init = &cik_init, .fini = &cik_fini, @@ -2328,88 +1994,12 @@ static struct radeon_asic ci_asic = { .set_page = &cik_vm_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_gfx_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP1_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_compute_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &cik_compute_ring_get_rptr, - .get_wptr = &cik_compute_ring_get_wptr, - .set_wptr = &cik_compute_ring_set_wptr, - }, - [CAYMAN_RING_TYPE_CP2_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_compute_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &cik_compute_ring_get_rptr, - .get_wptr = &cik_compute_ring_get_wptr, - .set_wptr = &cik_compute_ring_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &cik_sdma_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_sdma_fence_ring_emit, - .emit_semaphore = &cik_sdma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_sdma_ring_test, - .ib_test = &cik_sdma_ib_test, - .is_lockup = &cik_sdma_is_lockup, - .vm_flush = &cik_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_DMA1_INDEX] = { - .ib_execute = &cik_sdma_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_sdma_fence_ring_emit, - .emit_semaphore = &cik_sdma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_sdma_ring_test, - .ib_test = &cik_sdma_ib_test, - .is_lockup = &cik_sdma_is_lockup, - .vm_flush = &cik_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, + [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring, + [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring, + [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, + [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, }, .irq = { .set = &cik_irq_set, @@ -2502,88 +2092,12 @@ static struct radeon_asic kv_asic = { .set_page = &cik_vm_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_gfx_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_CP1_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_compute_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &cik_compute_ring_get_rptr, - .get_wptr = &cik_compute_ring_get_wptr, - .set_wptr = &cik_compute_ring_set_wptr, - }, - [CAYMAN_RING_TYPE_CP2_INDEX] = { - .ib_execute = &cik_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_fence_compute_ring_emit, - .emit_semaphore = &cik_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_ring_test, - .ib_test = &cik_ib_test, - .is_lockup = &cik_gfx_is_lockup, - .vm_flush = &cik_vm_flush, - .get_rptr = &cik_compute_ring_get_rptr, - .get_wptr = &cik_compute_ring_get_wptr, - .set_wptr = &cik_compute_ring_set_wptr, - }, - [R600_RING_TYPE_DMA_INDEX] = { - .ib_execute = &cik_sdma_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_sdma_fence_ring_emit, - .emit_semaphore = &cik_sdma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_sdma_ring_test, - .ib_test = &cik_sdma_ib_test, - .is_lockup = &cik_sdma_is_lockup, - .vm_flush = &cik_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [CAYMAN_RING_TYPE_DMA1_INDEX] = { - .ib_execute = &cik_sdma_ring_ib_execute, - .ib_parse = &cik_ib_parse, - .emit_fence = &cik_sdma_fence_ring_emit, - .emit_semaphore = &cik_sdma_semaphore_ring_emit, - .cs_parse = NULL, - .ring_test = &cik_sdma_ring_test, - .ib_test = &cik_sdma_ib_test, - .is_lockup = &cik_sdma_is_lockup, - .vm_flush = &cik_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - }, - [R600_RING_TYPE_UVD_INDEX] = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, - .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, - .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, - } + [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, + [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring, + [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring, + [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, + [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, }, .irq = { .set = &cik_irq_set, diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5384fa42c16e..a56084410372 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) && + if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { DRM_ERROR("Ring %d requires VM!\n", p->ring); return -EINVAL; -- cgit v1.2.3 From 02c9f7fa4e7230fc4ae8bf26f64e45aa76011f9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Aug 2013 11:56:51 +0200 Subject: drm/radeon: rework UVD writeback & [rw]ptr handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hardware just doesn't support this correctly. Disable it before we accidentally write anywhere we shouldn't. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 3 +-- drivers/gpu/drm/radeon/evergreen.c | 3 +-- drivers/gpu/drm/radeon/ni.c | 3 +-- drivers/gpu/drm/radeon/r600.c | 33 ++++++++++++++++++++------------- drivers/gpu/drm/radeon/radeon.h | 1 - drivers/gpu/drm/radeon/radeon_asic.c | 12 ++++++------ drivers/gpu/drm/radeon/radeon_asic.h | 6 ++++++ drivers/gpu/drm/radeon/radeon_ring.c | 2 +- drivers/gpu/drm/radeon/rv770.c | 3 +-- drivers/gpu/drm/radeon/si.c | 3 +-- 10 files changed, 38 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e661aec734b2..ce7036ae9f5a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7705,8 +7705,7 @@ static int cik_startup(struct radeon_device *rdev) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 710c1d4ae5db..2139f6c64341 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5291,8 +5291,7 @@ static int evergreen_startup(struct radeon_device *rdev) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index bc298a3500a4..f543f4ca4dda 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2225,8 +2225,7 @@ static int cayman_startup(struct radeon_device *rdev) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 8a600153ef6c..c1b0aba4431a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2623,31 +2623,38 @@ void r600_dma_fini(struct radeon_device *rdev) /* * UVD */ +uint32_t r600_uvd_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return RREG32(UVD_RBC_RB_RPTR); +} + +uint32_t r600_uvd_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return RREG32(UVD_RBC_RB_WPTR); +} + +void r600_uvd_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + WREG32(UVD_RBC_RB_WPTR, ring->wptr); +} + static int r600_uvd_rbc_start(struct radeon_device *rdev, bool ring_test) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - uint64_t rptr_addr; uint32_t rb_bufsz, tmp; int r; - rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET; - - if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) { - DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n"); - return -EINVAL; - } - /* force RBC into idle state */ WREG32(UVD_RBC_RB_CNTL, 0x11010101); /* Set the write pointer delay */ WREG32(UVD_RBC_RB_WPTR_CNTL, 0); - /* set the wb address */ - WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2); - /* programm the 4GB memory segment for rptr and ring buffer */ - WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) | + WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | (0x7 << 16) | (0x1 << 31)); /* Initialize the ring buffer's read and write pointers */ @@ -2662,7 +2669,7 @@ static int r600_uvd_rbc_start(struct radeon_device *rdev, bool ring_test) /* Set ring buffer size */ rb_bufsz = drm_order(ring->ring_size); rb_bufsz = (0x1 << 8) | rb_bufsz; - WREG32(UVD_RBC_RB_CNTL, rb_bufsz); + WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); if (ring_test) { ring->ready = true; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b26a20fe2859..2eab174bf22e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1027,7 +1027,6 @@ struct radeon_wb { #define R600_WB_DMA_RPTR_OFFSET 1792 #define R600_WB_IH_WPTR_OFFSET 2048 #define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 -#define R600_WB_UVD_RPTR_OFFSET 2560 #define R600_WB_EVENT_OFFSET 3072 #define CIK_WB_CP1_WPTR_OFFSET 3328 #define CIK_WB_CP2_WPTR_OFFSET 3584 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 012fe7218c74..7432247a812a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1157,9 +1157,9 @@ static struct radeon_asic_ring rv770_uvd_ring = { .ring_test = &r600_uvd_ring_test, .ib_test = &r600_uvd_ib_test, .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_uvd_get_rptr, + .get_wptr = &r600_uvd_get_wptr, + .set_wptr = &r600_uvd_set_wptr, }; static struct radeon_asic rv770_asic = { @@ -1593,9 +1593,9 @@ static struct radeon_asic_ring cayman_uvd_ring = { .ring_test = &r600_uvd_ring_test, .ib_test = &r600_uvd_ib_test, .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_uvd_get_rptr, + .get_wptr = &r600_uvd_get_wptr, + .set_wptr = &r600_uvd_set_wptr, }; static struct radeon_asic cayman_asic = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5630291c4b06..37baf9c696f0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -424,6 +424,12 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde struct seq_file *m); /* uvd */ +uint32_t r600_uvd_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring); +uint32_t r600_uvd_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); +void r600_uvd_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); int r600_uvd_init(struct radeon_device *rdev, bool ring_test); void r600_uvd_stop(struct radeon_device *rdev); int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index fb5ea6208970..cb4b931d8d9f 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -363,7 +363,7 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, { u32 rptr; - if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX]) + if (rdev->wb.enabled) rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); else rptr = RREG32(ring->rptr_reg); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 52253b2ab0d5..1e8cf49d5871 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1923,8 +1923,7 @@ static int rv770_startup(struct radeon_device *rdev) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index da23ce8f4388..4ff59c8f508f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6416,8 +6416,7 @@ static int si_startup(struct radeon_device *rdev) if (rdev->has_uvd) { ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (!r) -- cgit v1.2.3 From 2e1e6dad6a6d437e4c40611fdcc4e6cd9e2f969e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Aug 2013 11:56:52 +0200 Subject: drm/radeon: remove special handling for the DMA ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have callbacks for [rw]ptr handling we can remove the special handling for the DMA rings and use the callbacks instead. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 20 ++++++--------- drivers/gpu/drm/radeon/evergreen.c | 6 ++--- drivers/gpu/drm/radeon/ni.c | 8 +++--- drivers/gpu/drm/radeon/r100.c | 2 +- drivers/gpu/drm/radeon/r600.c | 47 ++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/radeon/radeon.h | 5 +--- drivers/gpu/drm/radeon/radeon_asic.c | 30 +++++++++++------------ drivers/gpu/drm/radeon/radeon_asic.h | 7 ++++++ drivers/gpu/drm/radeon/radeon_ring.c | 11 ++------- drivers/gpu/drm/radeon/rv770.c | 6 ++--- drivers/gpu/drm/radeon/si.c | 12 ++++----- 11 files changed, 95 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ce7036ae9f5a..34be795de173 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3414,7 +3414,6 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, cik_srbm_select(rdev, 0, 0, 0, 0); mutex_unlock(&rdev->srbm_mutex); } - rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; return rptr; } @@ -3433,7 +3432,6 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, cik_srbm_select(rdev, 0, 0, 0, 0); mutex_unlock(&rdev->srbm_mutex); } - wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; return wptr; } @@ -3441,10 +3439,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, void cik_compute_ring_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { - u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask; - - rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr); - WDOORBELL32(ring->doorbell_offset, wptr); + rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); + WDOORBELL32(ring->doorbell_offset, ring->wptr); } /** @@ -7649,7 +7645,7 @@ static int cik_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; @@ -7658,7 +7654,7 @@ static int cik_startup(struct radeon_device *rdev) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, - 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF)); + PACKET3(PACKET3_NOP, 0x3FFF)); if (r) return r; ring->me = 1; /* first MEC */ @@ -7670,7 +7666,7 @@ static int cik_startup(struct radeon_device *rdev) ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, - 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF)); + PACKET3(PACKET3_NOP, 0x3FFF)); if (r) return r; /* dGPU only have 1 MEC */ @@ -7683,7 +7679,7 @@ static int cik_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, - 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); if (r) return r; @@ -7691,7 +7687,7 @@ static int cik_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, - 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); if (r) return r; @@ -7707,7 +7703,7 @@ static int cik_startup(struct radeon_device *rdev) if (ring->ring_size) { r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (!r) r = r600_uvd_init(rdev, true); if (r) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 2139f6c64341..389f5a981358 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5268,14 +5268,14 @@ static int evergreen_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR, DMA_RB_WPTR, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0)); if (r) return r; @@ -5293,7 +5293,7 @@ static int evergreen_startup(struct radeon_device *rdev) if (ring->ring_size) { r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (!r) r = r600_uvd_init(rdev, true); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index f543f4ca4dda..e04b17338336 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2192,7 +2192,7 @@ static int cayman_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; @@ -2200,7 +2200,7 @@ static int cayman_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR + DMA0_REGISTER_OFFSET, DMA_RB_WPTR + DMA0_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; @@ -2208,7 +2208,7 @@ static int cayman_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, DMA_RB_RPTR + DMA1_REGISTER_OFFSET, DMA_RB_WPTR + DMA1_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; @@ -2227,7 +2227,7 @@ static int cayman_startup(struct radeon_device *rdev) if (ring->ring_size) { r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (!r) r = r600_uvd_init(rdev, true); if (r) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 75349cdaa84b..2cbc512645d4 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1102,7 +1102,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) r100_cp_load_microcode(rdev); r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, - 0, 0x7fffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c1b0aba4431a..30849eca6e07 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2504,6 +2504,49 @@ void r600_cp_fini(struct radeon_device *rdev) * solid fills, and a number of other things. It also * has support for tiling/detiling of buffers. */ + +/** + * r600_dma_get_rptr - get the current read pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current rptr from the hardware (r6xx+). + */ +uint32_t r600_dma_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; +} + +/** + * r600_dma_get_wptr - get the current write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current wptr from the hardware (r6xx+). + */ +uint32_t r600_dma_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; +} + +/** + * r600_dma_set_wptr - commit the write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Write the wptr back to the hardware (r6xx+). + */ +void r600_dma_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); +} + /** * r600_dma_stop - stop the async dma engine * @@ -3386,14 +3429,14 @@ static int r600_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR, DMA_RB_WPTR, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2eab174bf22e..791cc8de6395 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -760,8 +760,6 @@ struct radeon_ring { uint32_t align_mask; uint32_t ptr_mask; bool ready; - u32 ptr_reg_shift; - u32 ptr_reg_mask; u32 nop; u32 idx; u64 last_semaphore_signal_addr; @@ -912,8 +910,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, unsigned size, uint32_t *data); int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, - unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, - u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); + unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop); void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 7432247a812a..785b7a7add77 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -896,9 +896,9 @@ static struct radeon_asic_ring r600_dma_ring = { .ring_test = &r600_dma_ring_test, .ib_test = &r600_dma_ib_test, .is_lockup = &r600_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_dma_get_rptr, + .get_wptr = &r600_dma_get_wptr, + .set_wptr = &r600_dma_set_wptr, }; static struct radeon_asic r600_asic = { @@ -1275,9 +1275,9 @@ static struct radeon_asic_ring evergreen_dma_ring = { .ring_test = &r600_dma_ring_test, .ib_test = &r600_dma_ib_test, .is_lockup = &evergreen_dma_is_lockup, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_dma_get_rptr, + .get_wptr = &r600_dma_get_wptr, + .set_wptr = &r600_dma_set_wptr, }; static struct radeon_asic evergreen_asic = { @@ -1580,9 +1580,9 @@ static struct radeon_asic_ring cayman_dma_ring = { .ib_test = &r600_dma_ib_test, .is_lockup = &cayman_dma_is_lockup, .vm_flush = &cayman_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr + .get_rptr = &r600_dma_get_rptr, + .get_wptr = &r600_dma_get_wptr, + .set_wptr = &r600_dma_set_wptr }; static struct radeon_asic_ring cayman_uvd_ring = { @@ -1822,9 +1822,9 @@ static struct radeon_asic_ring si_dma_ring = { .ib_test = &r600_dma_ib_test, .is_lockup = &si_dma_is_lockup, .vm_flush = &si_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_dma_get_rptr, + .get_wptr = &r600_dma_get_wptr, + .set_wptr = &r600_dma_set_wptr, }; static struct radeon_asic si_asic = { @@ -1966,9 +1966,9 @@ static struct radeon_asic_ring ci_dma_ring = { .ib_test = &cik_sdma_ib_test, .is_lockup = &cik_sdma_is_lockup, .vm_flush = &cik_dma_vm_flush, - .get_rptr = &radeon_ring_generic_get_rptr, - .get_wptr = &radeon_ring_generic_get_wptr, - .set_wptr = &radeon_ring_generic_set_wptr, + .get_rptr = &r600_dma_get_rptr, + .get_wptr = &r600_dma_get_wptr, + .set_wptr = &r600_dma_set_wptr, }; static struct radeon_asic ci_asic = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 37baf9c696f0..5c53eb78b22d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -392,6 +392,13 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); int rv6xx_get_temp(struct radeon_device *rdev); int r600_dpm_pre_set_power_state(struct radeon_device *rdev); void r600_dpm_post_set_power_state(struct radeon_device *rdev); +/* r600 dma */ +uint32_t r600_dma_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring); +uint32_t r600_dma_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); +void r600_dma_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); /* rv6xx dpm */ int rv6xx_dpm_init(struct radeon_device *rdev); int rv6xx_dpm_enable(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index cb4b931d8d9f..46a25f037b84 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -367,7 +367,6 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); else rptr = RREG32(ring->rptr_reg); - rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; return rptr; } @@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, u32 wptr; wptr = RREG32(ring->wptr_reg); - wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; return wptr; } @@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, void radeon_ring_generic_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { - WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); + WREG32(ring->wptr_reg, ring->wptr); (void)RREG32(ring->wptr_reg); } @@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, * @rptr_offs: offset of the rptr writeback location in the WB buffer * @rptr_reg: MMIO offset of the rptr register * @wptr_reg: MMIO offset of the wptr register - * @ptr_reg_shift: bit offset of the rptr/wptr values - * @ptr_reg_mask: bit mask of the rptr/wptr values * @nop: nop packet for this ring * * Initialize the driver information for the selected ring (all asics). * Returns 0 on success, error on failure. */ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, - unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, - u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) + unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop) { int r; @@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig ring->rptr_offs = rptr_offs; ring->rptr_reg = rptr_reg; ring->wptr_reg = wptr_reg; - ring->ptr_reg_shift = ptr_reg_shift; - ring->ptr_reg_mask = ptr_reg_mask; ring->nop = nop; /* Allocate ring buffer */ if (ring->ring_obj == NULL) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 1e8cf49d5871..fd9dcb2d182b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1899,14 +1899,14 @@ static int rv770_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR, DMA_RB_WPTR, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; @@ -1925,7 +1925,7 @@ static int rv770_startup(struct radeon_device *rdev) if (ring->ring_size) { r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (!r) r = r600_uvd_init(rdev, true); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 4ff59c8f508f..ae232be62921 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6368,21 +6368,21 @@ static int si_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, CP_RB1_RPTR, CP_RB1_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, CP_RB2_RPTR, CP_RB2_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; @@ -6390,7 +6390,7 @@ static int si_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR + DMA0_REGISTER_OFFSET, DMA_RB_WPTR + DMA0_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; @@ -6398,7 +6398,7 @@ static int si_startup(struct radeon_device *rdev) r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, DMA_RB_RPTR + DMA1_REGISTER_OFFSET, DMA_RB_WPTR + DMA1_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; @@ -6418,7 +6418,7 @@ static int si_startup(struct radeon_device *rdev) if (ring->ring_size) { r = radeon_ring_init(rdev, ring, ring->ring_size, 0, UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (!r) r = r600_uvd_init(rdev, true); if (r) -- cgit v1.2.3 From e409b128625732926c112cc9b709fb7bb1aa387f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Aug 2013 11:56:53 +0200 Subject: drm/radeon: separate UVD code v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our different hardware blocks are actually completely separated, so it doesn't make much sense any more to structure the code by pure chipset generations. Start restructuring the code by separating our the UVD block. v2: updated commit message v3: rebased and restructurized start/stop functions for kv dpm. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 8 + drivers/gpu/drm/radeon/cik.c | 41 +--- drivers/gpu/drm/radeon/evergreen.c | 8 +- drivers/gpu/drm/radeon/kv_dpm.c | 11 +- drivers/gpu/drm/radeon/ni.c | 25 +- drivers/gpu/drm/radeon/r600.c | 345 ---------------------------- drivers/gpu/drm/radeon/radeon_asic.c | 32 +-- drivers/gpu/drm/radeon/radeon_asic.h | 55 +++-- drivers/gpu/drm/radeon/rv770.c | 105 +-------- drivers/gpu/drm/radeon/rv770d.h | 16 ++ drivers/gpu/drm/radeon/si.c | 8 +- drivers/gpu/drm/radeon/uvd_v1_0.c | 434 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/uvd_v2_2.c | 165 +++++++++++++ drivers/gpu/drm/radeon/uvd_v3_1.c | 55 +++++ drivers/gpu/drm/radeon/uvd_v4_2.c | 73 ++++++ 15 files changed, 826 insertions(+), 555 deletions(-) create mode 100644 drivers/gpu/drm/radeon/uvd_v1_0.c create mode 100644 drivers/gpu/drm/radeon/uvd_v2_2.c create mode 100644 drivers/gpu/drm/radeon/uvd_v3_1.c create mode 100644 drivers/gpu/drm/radeon/uvd_v4_2.c diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index ea913cc681b4..1e23b18d549a 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -82,6 +82,14 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ ci_dpm.o +# add UVD block +radeon-y += \ + radeon_uvd.o \ + uvd_v1_0.o \ + uvd_v2_2.o \ + uvd_v3_1.o \ + uvd_v4_2.o + radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 34be795de173..1400b5203db1 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -69,7 +69,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev); -void cik_uvd_resume(struct radeon_device *rdev); /* get temperature in millidegrees */ int ci_get_temp(struct radeon_device *rdev) @@ -7616,9 +7615,8 @@ static int cik_startup(struct radeon_device *rdev) return r; } - r = radeon_uvd_resume(rdev); + r = uvd_v4_2_resume(rdev); if (!r) { - cik_uvd_resume(rdev); r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); if (r) @@ -7705,7 +7703,7 @@ static int cik_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev, true); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } @@ -7770,7 +7768,7 @@ int cik_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); cik_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -7934,7 +7932,7 @@ void cik_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); cik_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); @@ -8595,37 +8593,6 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return r; } -void cik_uvd_resume(struct radeon_device *rdev) -{ - uint64_t addr; - uint32_t size; - - /* programm the VCPU memory controller bits 0-27 */ - addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET0, addr); - WREG32(UVD_VCPU_CACHE_SIZE0, size); - - addr += size; - size = RADEON_UVD_STACK_SIZE >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET1, addr); - WREG32(UVD_VCPU_CACHE_SIZE1, size); - - addr += size; - size = RADEON_UVD_HEAP_SIZE >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET2, addr); - WREG32(UVD_VCPU_CACHE_SIZE2, size); - - /* bits 28-31 */ - addr = (rdev->uvd.gpu_addr >> 28) & 0xF; - WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); - - /* bits 32-39 */ - addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; - WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); - -} - static void cik_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 389f5a981358..52ed22333f0d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5239,7 +5239,7 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -5295,7 +5295,7 @@ static int evergreen_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev, true); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: error initializing UVD (%d).\n", r); @@ -5350,7 +5350,7 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -5487,7 +5487,7 @@ void evergreen_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 3e232a4d3f4c..ef6c901690da 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -26,6 +26,7 @@ #include "cikd.h" #include "r600_dpm.h" #include "kv_dpm.h" +#include "radeon_asic.h" #include #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 @@ -59,10 +60,6 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); extern void cik_update_cg(struct radeon_device *rdev, u32 block, bool enable); -extern void cik_uvd_resume(struct radeon_device *rdev); -extern int r600_uvd_init(struct radeon_device *rdev, bool ring_test); -extern void r600_do_uvd_stop(struct radeon_device *rdev); - static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -1473,7 +1470,7 @@ void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) pi->uvd_power_gated = gate; if (gate) { - r600_do_uvd_stop(rdev); + uvd_v1_0_stop(rdev); cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); kv_update_uvd_dpm(rdev, gate); if (pi->caps_uvd_pg) @@ -1481,8 +1478,8 @@ void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) } else { if (pi->caps_uvd_pg) kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); - cik_uvd_resume(rdev); - r600_uvd_init(rdev, false); + uvd_v4_2_resume(rdev); + uvd_v1_0_start(rdev); cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); kv_update_uvd_dpm(rdev, gate); } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index e04b17338336..0205fa1594fa 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1373,23 +1373,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, 10); /* poll interval */ } -void cayman_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); -} - static void cayman_cp_enable(struct radeon_device *rdev, bool enable) { if (enable) @@ -2141,7 +2124,7 @@ static int cayman_startup(struct radeon_device *rdev) return r; } - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -2229,7 +2212,7 @@ static int cayman_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev, true); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } @@ -2283,7 +2266,7 @@ int cayman_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -2414,7 +2397,7 @@ void cayman_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 30849eca6e07..3a08ef92d33f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2663,231 +2663,6 @@ void r600_dma_fini(struct radeon_device *rdev) radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); } -/* - * UVD - */ -uint32_t r600_uvd_get_rptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - return RREG32(UVD_RBC_RB_RPTR); -} - -uint32_t r600_uvd_get_wptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - return RREG32(UVD_RBC_RB_WPTR); -} - -void r600_uvd_set_wptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - WREG32(UVD_RBC_RB_WPTR, ring->wptr); -} - -static int r600_uvd_rbc_start(struct radeon_device *rdev, bool ring_test) -{ - struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - uint32_t rb_bufsz, tmp; - int r; - - /* force RBC into idle state */ - WREG32(UVD_RBC_RB_CNTL, 0x11010101); - - /* Set the write pointer delay */ - WREG32(UVD_RBC_RB_WPTR_CNTL, 0); - - /* programm the 4GB memory segment for rptr and ring buffer */ - WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | - (0x7 << 16) | (0x1 << 31)); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(UVD_RBC_RB_RPTR, 0x0); - - ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); - WREG32(UVD_RBC_RB_WPTR, ring->wptr); - - /* set the ring address */ - WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); - - /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size); - rb_bufsz = (0x1 << 8) | rb_bufsz; - WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); - - if (ring_test) { - ring->ready = true; - r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); - if (r) { - ring->ready = false; - return r; - } - - r = radeon_ring_lock(rdev, ring, 10); - if (r) { - DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); - return r; - } - - tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); - radeon_ring_write(ring, tmp); - radeon_ring_write(ring, 0xFFFFF); - - /* Clear timeout status bits */ - radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); - radeon_ring_write(ring, 0x8); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); - radeon_ring_write(ring, 3); - - radeon_ring_unlock_commit(rdev, ring); - } - - return 0; -} - -void r600_do_uvd_stop(struct radeon_device *rdev) -{ - /* force RBC into idle state */ - WREG32(UVD_RBC_RB_CNTL, 0x11010101); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); - mdelay(1); - - /* put VCPU into reset */ - WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); - mdelay(5); - - /* disable VCPU clock */ - WREG32(UVD_VCPU_CNTL, 0x0); - - /* Unstall UMC and register bus */ - WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); -} - -void r600_uvd_stop(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - - r600_do_uvd_stop(rdev); - ring->ready = false; -} - -int r600_uvd_init(struct radeon_device *rdev, bool ring_test) -{ - int i, j, r; - /* disable byte swapping */ - u32 lmi_swap_cntl = 0; - u32 mp_swap_cntl = 0; - - /* raise clocks while booting up the VCPU */ - radeon_set_uvd_clocks(rdev, 53300, 40000); - - /* disable clock gating */ - WREG32(UVD_CGC_GATE, 0); - - /* disable interupt */ - WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | - LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | - CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET); - mdelay(5); - - /* take UVD block out of reset */ - WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD); - mdelay(5); - - /* initialize UVD memory controller */ - WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); - -#ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; -#endif - WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); - - WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); - WREG32(UVD_MPC_SET_MUXA1, 0x0); - WREG32(UVD_MPC_SET_MUXB0, 0x40c2040); - WREG32(UVD_MPC_SET_MUXB1, 0x0); - WREG32(UVD_MPC_SET_ALU, 0); - WREG32(UVD_MPC_SET_MUX, 0x88); - - /* take all subblocks out of reset, except VCPU */ - WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); - mdelay(5); - - /* enable VCPU clock */ - WREG32(UVD_VCPU_CNTL, 1 << 9); - - /* enable UMC */ - WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); - - /* boot up the VCPU */ - WREG32(UVD_SOFT_RESET, 0); - mdelay(10); - - WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); - - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(UVD_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET); - mdelay(10); - WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET); - mdelay(10); - r = -1; - } - - if (r) { - DRM_ERROR("UVD not responding, giving up!!!\n"); - goto done; - } - - /* enable interupt */ - WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1)); - - r = r600_uvd_rbc_start(rdev, ring_test); - if (!r) - DRM_INFO("UVD initialized successfully.\n"); - -done: - /* lower clocks again */ - radeon_set_uvd_clocks(rdev, 0, 0); - - return r; -} - /* * GPU scratch registers helpers function. */ @@ -2997,40 +2772,6 @@ int r600_dma_ring_test(struct radeon_device *rdev, return r; } -int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) -{ - uint32_t tmp = 0; - unsigned i; - int r; - - WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD); - r = radeon_ring_lock(rdev, ring, 3); - if (r) { - DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", - ring->idx, r); - return r; - } - radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); - radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(UVD_CONTEXT_ID); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - - if (i < rdev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - return r; -} - /* * CP fences/semaphores */ @@ -3082,30 +2823,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } } -void r600_uvd_fence_emit(struct radeon_device *rdev, - struct radeon_fence *fence) -{ - struct radeon_ring *ring = &rdev->ring[fence->ring]; - uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); - radeon_ring_write(ring, fence->seq); - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); - radeon_ring_write(ring, addr & 0xffffffff); - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); - radeon_ring_write(ring, upper_32_bits(addr) & 0xff); - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); - radeon_ring_write(ring, 2); - return; -} - void r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, @@ -3175,23 +2892,6 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, upper_32_bits(addr) & 0xff); } -void r600_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, emit_wait ? 1 : 0); -} - /** * r600_copy_cpdma - copy pages using the CP DMA engine * @@ -3656,16 +3356,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, ib->length_dw); } -void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0)); - radeon_ring_write(ring, ib->gpu_addr); - radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0)); - radeon_ring_write(ring, ib->length_dw); -} - int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) { struct radeon_ib ib; @@ -3783,41 +3473,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } -int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) -{ - struct radeon_fence *fence = NULL; - int r; - - r = radeon_set_uvd_clocks(rdev, 53300, 40000); - if (r) { - DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); - if (r) { - DRM_ERROR("radeon: failed to get create msg (%d).\n", r); - goto error; - } - - r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence); - if (r) { - DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = radeon_fence_wait(fence, false); - if (r) { - DRM_ERROR("radeon: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - radeon_fence_unref(&fence); - radeon_set_uvd_clocks(rdev, 0, 0); - return r; -} - /** * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine * diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 785b7a7add77..da755bf37421 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1150,16 +1150,16 @@ static struct radeon_asic rs780_asic = { }; static struct radeon_asic_ring rv770_uvd_ring = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &r600_uvd_semaphore_emit, + .ib_execute = &uvd_v1_0_ib_execute, + .emit_fence = &uvd_v2_2_fence_emit, + .emit_semaphore = &uvd_v1_0_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, + .ring_test = &uvd_v1_0_ring_test, + .ib_test = &uvd_v1_0_ib_test, .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &r600_uvd_get_rptr, - .get_wptr = &r600_uvd_get_wptr, - .set_wptr = &r600_uvd_set_wptr, + .get_rptr = &uvd_v1_0_get_rptr, + .get_wptr = &uvd_v1_0_get_wptr, + .set_wptr = &uvd_v1_0_set_wptr, }; static struct radeon_asic rv770_asic = { @@ -1586,16 +1586,16 @@ static struct radeon_asic_ring cayman_dma_ring = { }; static struct radeon_asic_ring cayman_uvd_ring = { - .ib_execute = &r600_uvd_ib_execute, - .emit_fence = &r600_uvd_fence_emit, - .emit_semaphore = &cayman_uvd_semaphore_emit, + .ib_execute = &uvd_v1_0_ib_execute, + .emit_fence = &uvd_v2_2_fence_emit, + .emit_semaphore = &uvd_v3_1_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, - .ring_test = &r600_uvd_ring_test, - .ib_test = &r600_uvd_ib_test, + .ring_test = &uvd_v1_0_ring_test, + .ib_test = &uvd_v1_0_ib_test, .is_lockup = &radeon_ring_test_lockup, - .get_rptr = &r600_uvd_get_rptr, - .get_wptr = &r600_uvd_get_wptr, - .set_wptr = &r600_uvd_set_wptr, + .get_rptr = &uvd_v1_0_get_rptr, + .get_wptr = &uvd_v1_0_get_wptr, + .set_wptr = &uvd_v1_0_set_wptr, }; static struct radeon_asic cayman_asic = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5c53eb78b22d..e69f00a7f153 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -336,7 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); -int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int r600_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); @@ -430,24 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev, void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); -/* uvd */ -uint32_t r600_uvd_get_rptr(struct radeon_device *rdev, - struct radeon_ring *ring); -uint32_t r600_uvd_get_wptr(struct radeon_device *rdev, - struct radeon_ring *ring); -void r600_uvd_set_wptr(struct radeon_device *rdev, - struct radeon_ring *ring); -int r600_uvd_init(struct radeon_device *rdev, bool ring_test); -void r600_uvd_stop(struct radeon_device *rdev); -int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); -void r600_uvd_fence_emit(struct radeon_device *rdev, - struct radeon_fence *fence); -void r600_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait); -void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); - /* * rv770,rv730,rv710,rv740 */ @@ -465,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence **fence); u32 rv770_get_xclk(struct radeon_device *rdev); -int rv770_uvd_resume(struct radeon_device *rdev); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_get_temp(struct radeon_device *rdev); /* rv7xx pm */ @@ -800,4 +780,39 @@ int kv_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); +/* uvd v1.0 */ +uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring); +uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); +void uvd_v1_0_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); + +int uvd_v1_0_init(struct radeon_device *rdev); +void uvd_v1_0_fini(struct radeon_device *rdev); +int uvd_v1_0_start(struct radeon_device *rdev); +void uvd_v1_0_stop(struct radeon_device *rdev); + +int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); +int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); +void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); +void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); + +/* uvd v2.2 */ +int uvd_v2_2_resume(struct radeon_device *rdev); +void uvd_v2_2_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence); + +/* uvd v3.1 */ +void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); + +/* uvd v4.2 */ +int uvd_v4_2_resume(struct radeon_device *rdev); + #endif diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index fd9dcb2d182b..aaab7b1bba27 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev) return reference_clock; } -int rv770_uvd_resume(struct radeon_device *rdev) -{ - uint64_t addr; - uint32_t chip_id, size; - int r; - - r = radeon_uvd_resume(rdev); - if (r) - return r; - - /* programm the VCPU memory controller bits 0-27 */ - addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET0, addr); - WREG32(UVD_VCPU_CACHE_SIZE0, size); - - addr += size; - size = RADEON_UVD_STACK_SIZE >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET1, addr); - WREG32(UVD_VCPU_CACHE_SIZE1, size); - - addr += size; - size = RADEON_UVD_HEAP_SIZE >> 3; - WREG32(UVD_VCPU_CACHE_OFFSET2, addr); - WREG32(UVD_VCPU_CACHE_SIZE2, size); - - /* bits 28-31 */ - addr = (rdev->uvd.gpu_addr >> 28) & 0xF; - WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); - - /* bits 32-39 */ - addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; - WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); - - /* tell firmware which hardware it is running on */ - switch (rdev->family) { - default: - return -EINVAL; - case CHIP_RV710: - chip_id = 0x01000005; - break; - case CHIP_RV730: - chip_id = 0x01000006; - break; - case CHIP_RV740: - chip_id = 0x01000007; - break; - case CHIP_CYPRESS: - case CHIP_HEMLOCK: - chip_id = 0x01000008; - break; - case CHIP_JUNIPER: - chip_id = 0x01000009; - break; - case CHIP_REDWOOD: - chip_id = 0x0100000a; - break; - case CHIP_CEDAR: - chip_id = 0x0100000b; - break; - case CHIP_SUMO: - case CHIP_SUMO2: - chip_id = 0x0100000c; - break; - case CHIP_PALM: - chip_id = 0x0100000e; - break; - case CHIP_CAYMAN: - chip_id = 0x0100000f; - break; - case CHIP_BARTS: - chip_id = 0x01000010; - break; - case CHIP_TURKS: - chip_id = 0x01000011; - break; - case CHIP_CAICOS: - chip_id = 0x01000012; - break; - case CHIP_TAHITI: - chip_id = 0x01000014; - break; - case CHIP_VERDE: - chip_id = 0x01000015; - break; - case CHIP_PITCAIRN: - chip_id = 0x01000016; - break; - case CHIP_ARUBA: - chip_id = 0x01000017; - break; - } - WREG32(UVD_VCPU_CHIP_ID, chip_id); - - return 0; -} - u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; @@ -1870,7 +1773,7 @@ static int rv770_startup(struct radeon_device *rdev) return r; } - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -1927,7 +1830,7 @@ static int rv770_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev, true); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); @@ -1977,7 +1880,7 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -2092,7 +1995,7 @@ void rv770_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 6bef2b7d601b..9fe60e542922 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -971,7 +971,21 @@ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) +/* + * PM4 + */ +#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ + (((reg) >> 2) & 0xFFFF) | \ + ((n) & 0x3FFF) << 16) +#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ + (((op) & 0xFF) << 8) | \ + ((n) & 0x3FFF) << 16) + /* UVD */ +#define UVD_GPCOM_VCPU_CMD 0xef0c +#define UVD_GPCOM_VCPU_DATA0 0xef10 +#define UVD_GPCOM_VCPU_DATA1 0xef14 + #define UVD_LMI_EXT40_ADDR 0xf498 #define UVD_VCPU_CHIP_ID 0xf4d4 #define UVD_VCPU_CACHE_OFFSET0 0xf4d8 @@ -985,4 +999,6 @@ #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 +#define UVD_CONTEXT_ID 0xf6f4 + #endif diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ae232be62921..f3f79089405e 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6339,7 +6339,7 @@ static int si_startup(struct radeon_device *rdev) } if (rdev->has_uvd) { - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -6420,7 +6420,7 @@ static int si_startup(struct radeon_device *rdev) UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev, true); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } @@ -6473,7 +6473,7 @@ int si_suspend(struct radeon_device *rdev) si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); } si_irq_suspend(rdev); @@ -6616,7 +6616,7 @@ void si_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->has_uvd) { - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); } si_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c new file mode 100644 index 000000000000..76ca669f0c8e --- /dev/null +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -0,0 +1,434 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "r600d.h" + +/** + * uvd_v1_0_get_rptr - get read pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Returns the current hardware read pointer + */ +uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return RREG32(UVD_RBC_RB_RPTR); +} + +/** + * uvd_v1_0_get_wptr - get write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Returns the current hardware write pointer + */ +uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return RREG32(UVD_RBC_RB_WPTR); +} + +/** + * uvd_v1_0_set_wptr - set write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Commits the write pointer to the hardware + */ +void uvd_v1_0_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + WREG32(UVD_RBC_RB_WPTR, ring->wptr); +} + +/** + * uvd_v1_0_init - start and test UVD block + * + * @rdev: radeon_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +int uvd_v1_0_init(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + uint32_t tmp; + int r; + + /* raise clocks while booting up the VCPU */ + radeon_set_uvd_clocks(rdev, 53300, 40000); + + uvd_v1_0_start(rdev); + + ring->ready = true; + r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); + if (r) { + ring->ready = false; + goto done; + } + + r = radeon_ring_lock(rdev, ring, 10); + if (r) { + DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); + goto done; + } + + tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + radeon_ring_write(ring, tmp); + radeon_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); + radeon_ring_write(ring, 0x8); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); + radeon_ring_write(ring, 3); + + radeon_ring_unlock_commit(rdev, ring); + +done: + /* lower clocks again */ + radeon_set_uvd_clocks(rdev, 0, 0); + + if (!r) + DRM_INFO("UVD initialized successfully.\n"); + + return r; +} + +/** + * uvd_v1_0_fini - stop the hardware block + * + * @rdev: radeon_device pointer + * + * Stop the UVD block, mark ring as not ready any more + */ +void uvd_v1_0_fini(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + + uvd_v1_0_stop(rdev); + ring->ready = false; +} + +/** + * uvd_v1_0_start - start UVD block + * + * @rdev: radeon_device pointer + * + * Setup and start the UVD block + */ +int uvd_v1_0_start(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + uint32_t rb_bufsz; + int i, j, r; + + /* disable byte swapping */ + u32 lmi_swap_cntl = 0; + u32 mp_swap_cntl = 0; + + /* disable clock gating */ + WREG32(UVD_CGC_GATE, 0); + + /* disable interupt */ + WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | + LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | + CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET); + mdelay(5); + + /* take UVD block out of reset */ + WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | + (1 << 21) | (1 << 9) | (1 << 20)); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); + + WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); + WREG32(UVD_MPC_SET_MUXA1, 0x0); + WREG32(UVD_MPC_SET_MUXB0, 0x40c2040); + WREG32(UVD_MPC_SET_MUXB1, 0x0); + WREG32(UVD_MPC_SET_ALU, 0); + WREG32(UVD_MPC_SET_MUX, 0x88); + + /* take all subblocks out of reset, except VCPU */ + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); + mdelay(5); + + /* enable VCPU clock */ + WREG32(UVD_VCPU_CNTL, 1 << 9); + + /* enable UMC */ + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + + /* boot up the VCPU */ + WREG32(UVD_SOFT_RESET, 0); + mdelay(10); + + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(UVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); + WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET); + mdelay(10); + WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET); + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("UVD not responding, giving up!!!\n"); + return r; + } + + /* enable interupt */ + WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1)); + + /* force RBC into idle state */ + WREG32(UVD_RBC_RB_CNTL, 0x11010101); + + /* Set the write pointer delay */ + WREG32(UVD_RBC_RB_WPTR_CNTL, 0); + + /* programm the 4GB memory segment for rptr and ring buffer */ + WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | + (0x7 << 16) | (0x1 << 31)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(UVD_RBC_RB_RPTR, 0x0); + + ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); + WREG32(UVD_RBC_RB_WPTR, ring->wptr); + + /* set the ring address */ + WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); + + /* Set ring buffer size */ + rb_bufsz = drm_order(ring->ring_size); + rb_bufsz = (0x1 << 8) | rb_bufsz; + WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); + + return 0; +} + +/** + * uvd_v1_0_stop - stop UVD block + * + * @rdev: radeon_device pointer + * + * stop the UVD block + */ +void uvd_v1_0_stop(struct radeon_device *rdev) +{ + /* force RBC into idle state */ + WREG32(UVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); + mdelay(5); + + /* disable VCPU clock */ + WREG32(UVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); +} + +/** + * uvd_v1_0_ring_test - register write test + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Test if we can successfully write to the context register + */ +int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD); + r = radeon_ring_lock(rdev, ring, 3); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", + ring->idx, r); + return r; + } + radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); + radeon_ring_write(ring, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev, ring); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(UVD_CONTEXT_ID); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * uvd_v1_0_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, emit_wait ? 1 : 0); +} + +/** + * uvd_v1_0_ib_execute - execute indirect buffer + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer + */ +void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0)); + radeon_ring_write(ring, ib->gpu_addr); + radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0)); + radeon_ring_write(ring, ib->length_dw); +} + +/** + * uvd_v1_0_ib_test - test ib execution + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Test if we can successfully execute an IB + */ +int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_fence *fence = NULL; + int r; + + r = radeon_set_uvd_clocks(rdev, 53300, 40000); + if (r) { + DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); + return r; + } + + r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); + if (r) { + DRM_ERROR("radeon: failed to get create msg (%d).\n", r); + goto error; + } + + r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence); + if (r) { + DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); + goto error; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + goto error; + } + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); +error: + radeon_fence_unref(&fence); + radeon_set_uvd_clocks(rdev, 0, 0); + return r; +} diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c new file mode 100644 index 000000000000..b19ef4951085 --- /dev/null +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -0,0 +1,165 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "rv770d.h" + +/** + * uvd_v2_2_fence_emit - emit an fence & trap command + * + * @rdev: radeon_device pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +void uvd_v2_2_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); + radeon_ring_write(ring, fence->seq); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); + radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); + radeon_ring_write(ring, upper_32_bits(addr) & 0xff); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); + radeon_ring_write(ring, 0); + + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); + radeon_ring_write(ring, 2); + return; +} + +/** + * uvd_v2_2_resume - memory controller programming + * + * @rdev: radeon_device pointer + * + * Let the UVD memory controller know it's offsets + */ +int uvd_v2_2_resume(struct radeon_device *rdev) +{ + uint64_t addr; + uint32_t chip_id, size; + int r; + + r = radeon_uvd_resume(rdev); + if (r) + return r; + + /* programm the VCPU memory controller bits 0-27 */ + addr = rdev->uvd.gpu_addr >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET0, addr); + WREG32(UVD_VCPU_CACHE_SIZE0, size); + + addr += size; + size = RADEON_UVD_STACK_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET1, addr); + WREG32(UVD_VCPU_CACHE_SIZE1, size); + + addr += size; + size = RADEON_UVD_HEAP_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET2, addr); + WREG32(UVD_VCPU_CACHE_SIZE2, size); + + /* bits 28-31 */ + addr = (rdev->uvd.gpu_addr >> 28) & 0xF; + WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); + + /* bits 32-39 */ + addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; + WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + + /* tell firmware which hardware it is running on */ + switch (rdev->family) { + default: + return -EINVAL; + case CHIP_RV710: + chip_id = 0x01000005; + break; + case CHIP_RV730: + chip_id = 0x01000006; + break; + case CHIP_RV740: + chip_id = 0x01000007; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + chip_id = 0x01000008; + break; + case CHIP_JUNIPER: + chip_id = 0x01000009; + break; + case CHIP_REDWOOD: + chip_id = 0x0100000a; + break; + case CHIP_CEDAR: + chip_id = 0x0100000b; + break; + case CHIP_SUMO: + case CHIP_SUMO2: + chip_id = 0x0100000c; + break; + case CHIP_PALM: + chip_id = 0x0100000e; + break; + case CHIP_CAYMAN: + chip_id = 0x0100000f; + break; + case CHIP_BARTS: + chip_id = 0x01000010; + break; + case CHIP_TURKS: + chip_id = 0x01000011; + break; + case CHIP_CAICOS: + chip_id = 0x01000012; + break; + case CHIP_TAHITI: + chip_id = 0x01000014; + break; + case CHIP_VERDE: + chip_id = 0x01000015; + break; + case CHIP_PITCAIRN: + chip_id = 0x01000016; + break; + case CHIP_ARUBA: + chip_id = 0x01000017; + break; + } + WREG32(UVD_VCPU_CHIP_ID, chip_id); + + return 0; +} diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c new file mode 100644 index 000000000000..5b6fa1f62d4e --- /dev/null +++ b/drivers/gpu/drm/radeon/uvd_v3_1.c @@ -0,0 +1,55 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "nid.h" + +/** + * uvd_v3_1_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); +} diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c new file mode 100644 index 000000000000..d7e480786098 --- /dev/null +++ b/drivers/gpu/drm/radeon/uvd_v4_2.c @@ -0,0 +1,73 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "cikd.h" + +/** + * uvd_v4_2_resume - memory controller programming + * + * @rdev: radeon_device pointer + * + * Let the UVD memory controller know it's offsets + */ +int uvd_v4_2_resume(struct radeon_device *rdev) +{ + uint64_t addr; + uint32_t size; + int r; + + r = radeon_uvd_resume(rdev); + if (r) + return r; + + /* programm the VCPU memory controller bits 0-27 */ + addr = rdev->uvd.gpu_addr >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET0, addr); + WREG32(UVD_VCPU_CACHE_SIZE0, size); + + addr += size; + size = RADEON_UVD_STACK_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET1, addr); + WREG32(UVD_VCPU_CACHE_SIZE1, size); + + addr += size; + size = RADEON_UVD_HEAP_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET2, addr); + WREG32(UVD_VCPU_CACHE_SIZE2, size); + + /* bits 28-31 */ + addr = (rdev->uvd.gpu_addr >> 28) & 0xF; + WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); + + /* bits 32-39 */ + addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; + WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + + return 0; +} -- cgit v1.2.3 From 2483b4ea982efe8a544697d3f9642932e9af4dc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Aug 2013 11:56:54 +0200 Subject: drm/radeon: separate DMA code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to separating the UVD code, just put the DMA functions into separate files. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 9 + drivers/gpu/drm/radeon/cik.c | 736 +------------------------------ drivers/gpu/drm/radeon/cik_sdma.c | 785 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/evergreen.c | 161 +------ drivers/gpu/drm/radeon/evergreen_dma.c | 190 ++++++++ drivers/gpu/drm/radeon/ni.c | 293 +----------- drivers/gpu/drm/radeon/ni_dma.c | 338 ++++++++++++++ drivers/gpu/drm/radeon/r600.c | 473 +------------------- drivers/gpu/drm/radeon/r600_dma.c | 497 +++++++++++++++++++++ drivers/gpu/drm/radeon/rv770.c | 74 ---- drivers/gpu/drm/radeon/rv770_dma.c | 101 +++++ drivers/gpu/drm/radeon/si.c | 188 +------- drivers/gpu/drm/radeon/si_dma.c | 235 ++++++++++ 13 files changed, 2181 insertions(+), 1899 deletions(-) create mode 100644 drivers/gpu/drm/radeon/cik_sdma.c create mode 100644 drivers/gpu/drm/radeon/evergreen_dma.c create mode 100644 drivers/gpu/drm/radeon/ni_dma.c create mode 100644 drivers/gpu/drm/radeon/r600_dma.c create mode 100644 drivers/gpu/drm/radeon/rv770_dma.c create mode 100644 drivers/gpu/drm/radeon/si_dma.c diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 1e23b18d549a..da2a8e9e9308 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -82,6 +82,15 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ ci_dpm.o +# add async DMA block +radeon-y += \ + r600_dma.o \ + rv770_dma.o \ + evergreen_dma.o \ + ni_dma.o \ + si_dma.o \ + cik_sdma.o \ + # add UVD block radeon-y += \ radeon_uvd.o \ diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1400b5203db1..692e31b95d34 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -64,6 +64,14 @@ extern int sumo_rlc_init(struct radeon_device *rdev); extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void si_rlc_reset(struct radeon_device *rdev); extern void si_init_uvd_internal_cg(struct radeon_device *rdev); +extern int cik_sdma_resume(struct radeon_device *rdev); +extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); +extern void cik_sdma_fini(struct radeon_device *rdev); +extern void cik_sdma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); @@ -3987,579 +3995,6 @@ static int cik_cp_resume(struct radeon_device *rdev) return 0; } -/* - * sDMA - System DMA - * Starting with CIK, the GPU has new asynchronous - * DMA engines. These engines are used for compute - * and gfx. There are two DMA engines (SDMA0, SDMA1) - * and each one supports 1 ring buffer used for gfx - * and 2 queues used for compute. - * - * The programming model is very similar to the CP - * (ring buffer, IBs, etc.), but sDMA has it's own - * packet format that is different from the PM4 format - * used by the CP. sDMA supports copying data, writing - * embedded data, solid fills, and a number of other - * things. It also has support for tiling/detiling of - * buffers. - */ -/** - * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (CIK). - */ -void cik_sdma_ring_ib_execute(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 5; - while ((next_rptr & 7) != 4) - next_rptr++; - next_rptr += 4; - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - radeon_ring_write(ring, 1); /* number of DWs to follow */ - radeon_ring_write(ring, next_rptr); - } - - /* IB packet must end on a 8 DW boundary */ - while ((ring->wptr & 7) != 4) - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); - radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ - radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); - radeon_ring_write(ring, ib->length_dw); - -} - -/** - * cik_sdma_fence_ring_emit - emit a fence on the DMA ring - * - * @rdev: radeon_device pointer - * @fence: radeon fence object - * - * Add a DMA fence packet to the ring to write - * the fence seq number and DMA trap packet to generate - * an interrupt if needed (CIK). - */ -void cik_sdma_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence) -{ - struct radeon_ring *ring = &rdev->ring[fence->ring]; - u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | - SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ - u32 ref_and_mask; - - if (fence->ring == R600_RING_TYPE_DMA_INDEX) - ref_and_mask = SDMA0; - else - ref_and_mask = SDMA1; - - /* write the fence */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); - radeon_ring_write(ring, addr & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); - radeon_ring_write(ring, fence->seq); - /* generate an interrupt */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); - /* flush HDP */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); - radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); - radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); - radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ - radeon_ring_write(ring, ref_and_mask); /* MASK */ - radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ -} - -/** - * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * @semaphore: radeon semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (CIK). - */ -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); - radeon_ring_write(ring, addr & 0xfffffff8); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); -} - -/** - * cik_sdma_gfx_stop - stop the gfx async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the gfx async dma ring buffers (CIK). - */ -static void cik_sdma_gfx_stop(struct radeon_device *rdev) -{ - u32 rb_cntl, reg_offset; - int i; - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - - for (i = 0; i < 2; i++) { - if (i == 0) - reg_offset = SDMA0_REGISTER_OFFSET; - else - reg_offset = SDMA1_REGISTER_OFFSET; - rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); - rb_cntl &= ~SDMA_RB_ENABLE; - WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); - WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); - } -} - -/** - * cik_sdma_rlc_stop - stop the compute async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the compute async dma queues (CIK). - */ -static void cik_sdma_rlc_stop(struct radeon_device *rdev) -{ - /* XXX todo */ -} - -/** - * cik_sdma_enable - stop the async dma engines - * - * @rdev: radeon_device pointer - * @enable: enable/disable the DMA MEs. - * - * Halt or unhalt the async dma engines (CIK). - */ -static void cik_sdma_enable(struct radeon_device *rdev, bool enable) -{ - u32 me_cntl, reg_offset; - int i; - - for (i = 0; i < 2; i++) { - if (i == 0) - reg_offset = SDMA0_REGISTER_OFFSET; - else - reg_offset = SDMA1_REGISTER_OFFSET; - me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); - if (enable) - me_cntl &= ~SDMA_HALT; - else - me_cntl |= SDMA_HALT; - WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); - } -} - -/** - * cik_sdma_gfx_resume - setup and start the async dma engines - * - * @rdev: radeon_device pointer - * - * Set up the gfx DMA ring buffers and enable them (CIK). - * Returns 0 for success, error for failure. - */ -static int cik_sdma_gfx_resume(struct radeon_device *rdev) -{ - struct radeon_ring *ring; - u32 rb_cntl, ib_cntl; - u32 rb_bufsz; - u32 reg_offset, wb_offset; - int i, r; - - for (i = 0; i < 2; i++) { - if (i == 0) { - ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; - reg_offset = SDMA0_REGISTER_OFFSET; - wb_offset = R600_WB_DMA_RPTR_OFFSET; - } else { - ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; - reg_offset = SDMA1_REGISTER_OFFSET; - wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; - } - - WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); - WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); - - /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); - rb_cntl = rb_bufsz << 1; -#ifdef __BIG_ENDIAN - rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; -#endif - WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); - WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); - - /* set the wb address whether it's enabled or not */ - WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, - upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, - ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); - - if (rdev->wb.enabled) - rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; - - WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); - WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); - - ring->wptr = 0; - WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); - - ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; - - /* enable DMA RB */ - WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); - - ib_cntl = SDMA_IB_ENABLE; -#ifdef __BIG_ENDIAN - ib_cntl |= SDMA_IB_SWAP_ENABLE; -#endif - /* enable DMA IBs */ - WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); - - ring->ready = true; - - r = radeon_ring_test(rdev, ring->idx, ring); - if (r) { - ring->ready = false; - return r; - } - } - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - - return 0; -} - -/** - * cik_sdma_rlc_resume - setup and start the async dma engines - * - * @rdev: radeon_device pointer - * - * Set up the compute DMA queues and enable them (CIK). - * Returns 0 for success, error for failure. - */ -static int cik_sdma_rlc_resume(struct radeon_device *rdev) -{ - /* XXX todo */ - return 0; -} - -/** - * cik_sdma_load_microcode - load the sDMA ME ucode - * - * @rdev: radeon_device pointer - * - * Loads the sDMA0/1 ucode. - * Returns 0 for success, -EINVAL if the ucode is not available. - */ -static int cik_sdma_load_microcode(struct radeon_device *rdev) -{ - const __be32 *fw_data; - int i; - - if (!rdev->sdma_fw) - return -EINVAL; - - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - - /* halt the MEs */ - cik_sdma_enable(rdev, false); - - /* sdma0 */ - fw_data = (const __be32 *)rdev->sdma_fw->data; - WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); - for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) - WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); - WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); - - /* sdma1 */ - fw_data = (const __be32 *)rdev->sdma_fw->data; - WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); - for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) - WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); - WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); - - WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); - WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); - return 0; -} - -/** - * cik_sdma_resume - setup and start the async dma engines - * - * @rdev: radeon_device pointer - * - * Set up the DMA engines and enable them (CIK). - * Returns 0 for success, error for failure. - */ -static int cik_sdma_resume(struct radeon_device *rdev) -{ - int r; - - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - RREG32(SRBM_SOFT_RESET); - - r = cik_sdma_load_microcode(rdev); - if (r) - return r; - - /* unhalt the MEs */ - cik_sdma_enable(rdev, true); - - /* start the gfx rings and rlc compute queues */ - r = cik_sdma_gfx_resume(rdev); - if (r) - return r; - r = cik_sdma_rlc_resume(rdev); - if (r) - return r; - - return 0; -} - -/** - * cik_sdma_fini - tear down the async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the async dma engines and free the rings (CIK). - */ -static void cik_sdma_fini(struct radeon_device *rdev) -{ - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ - cik_sdma_enable(rdev, false); - radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); - radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); - /* XXX - compute dma queue tear down */ -} - -/** - * cik_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (CIK). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int cik_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_bytes, cur_size_in_bytes; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); - num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); - r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_bytes = size_in_bytes; - if (cur_size_in_bytes > 0x1fffff) - cur_size_in_bytes = 0x1fffff; - size_in_bytes -= cur_size_in_bytes; - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); - radeon_ring_write(ring, cur_size_in_bytes); - radeon_ring_write(ring, 0); /* src/dst endian swap */ - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); - src_offset += cur_size_in_bytes; - dst_offset += cur_size_in_bytes; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - -/** - * cik_sdma_ring_test - simple async dma engine test - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Test the DMA engine by writing using it to write an - * value to memory. (CIK). - * Returns 0 for success, error for failure. - */ -int cik_sdma_ring_test(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - unsigned i; - int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; - u32 tmp; - - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } - - tmp = 0xCAFEDEAD; - writel(tmp, ptr); - - r = radeon_ring_lock(rdev, ring, 4); - if (r) { - DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); - return r; - } - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); - radeon_ring_write(ring, 1); /* number of DWs to follow */ - radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); - - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - - if (i < rdev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - return r; -} - -/** - * cik_sdma_ib_test - test an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Test a simple IB in the DMA ring (CIK). - * Returns 0 on success, error on failure. - */ -int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) -{ - struct radeon_ib ib; - unsigned i; - int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; - u32 tmp = 0; - - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } - - tmp = 0xCAFEDEAD; - writel(tmp, ptr); - - r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); - if (r) { - DRM_ERROR("radeon: failed to get ib (%d).\n", r); - return r; - } - - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; - ib.ptr[3] = 1; - ib.ptr[4] = 0xDEADBEEF; - ib.length_dw = 5; - - r = radeon_ib_schedule(rdev, &ib, NULL); - if (r) { - radeon_ib_free(rdev, &ib); - DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); - return r; - } - r = radeon_fence_wait(ib.fence, false); - if (r) { - DRM_ERROR("radeon: fence wait failed (%d).\n", r); - return r; - } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < rdev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); - } else { - DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); - r = -EINVAL; - } - radeon_ib_free(rdev, &ib); - return r; -} - - static void cik_print_gpu_status_regs(struct radeon_device *rdev) { dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", @@ -4609,7 +4044,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev) * mask to be used by cik_gpu_soft_reset(). * Returns a mask of the blocks to be reset. */ -static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev) +u32 cik_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -4860,34 +4295,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } -/** - * cik_sdma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up (CIK). - * Returns true if the engine appears to be locked up, false if not. - */ -bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = cik_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - /* MC */ /** * cik_mc_program - program the GPU memory controller @@ -5424,131 +4831,8 @@ void cik_vm_set_page(struct radeon_device *rdev, } } else { /* DMA */ - if (flags & RADEON_VM_PAGE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); - } -} - -/** - * cik_dma_vm_flush - cik vm flush using sDMA - * - * @rdev: radeon_device pointer - * - * Update the page table base and flush the VM TLB - * using sDMA (CIK). - */ -void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) -{ - struct radeon_ring *ring = &rdev->ring[ridx]; - u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | - SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ - u32 ref_and_mask; - - if (vm == NULL) - return; - - if (ridx == R600_RING_TYPE_DMA_INDEX) - ref_and_mask = SDMA0; - else - ref_and_mask = SDMA1; - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - if (vm->id < 8) { - radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); - } else { - radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); + cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); } - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - - /* update SH_MEM_* regs */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); - radeon_ring_write(ring, VMID(vm->id)); - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SH_MEM_BASES >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SH_MEM_CONFIG >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2); - radeon_ring_write(ring, 1); - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); - radeon_ring_write(ring, VMID(0)); - - /* flush HDP */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); - radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); - radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); - radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ - radeon_ring_write(ring, ref_and_mask); /* MASK */ - radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ - - /* flush TLB */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); - radeon_ring_write(ring, 1 << vm->id); } /* diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c new file mode 100644 index 000000000000..8925185a0049 --- /dev/null +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -0,0 +1,785 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "cikd.h" + +/* sdma */ +#define CIK_SDMA_UCODE_SIZE 1050 +#define CIK_SDMA_UCODE_VERSION 64 + +u32 cik_gpu_check_soft_reset(struct radeon_device *rdev); + +/* + * sDMA - System DMA + * Starting with CIK, the GPU has new asynchronous + * DMA engines. These engines are used for compute + * and gfx. There are two DMA engines (SDMA0, SDMA1) + * and each one supports 1 ring buffer used for gfx + * and 2 queues used for compute. + * + * The programming model is very similar to the CP + * (ring buffer, IBs, etc.), but sDMA has it's own + * packet format that is different from the PM4 format + * used by the CP. sDMA supports copying data, writing + * embedded data, solid fills, and a number of other + * things. It also has support for tiling/detiling of + * buffers. + */ + +/** + * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (CIK). + */ +void cik_sdma_ring_ib_execute(struct radeon_device *rdev, + struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 5; + while ((next_rptr & 7) != 4) + next_rptr++; + next_rptr += 4; + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, 1); /* number of DWs to follow */ + radeon_ring_write(ring, next_rptr); + } + + /* IB packet must end on a 8 DW boundary */ + while ((ring->wptr & 7) != 4) + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); + radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ + radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); + radeon_ring_write(ring, ib->length_dw); + +} + +/** + * cik_sdma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (CIK). + */ +void cik_sdma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ + u32 ref_and_mask; + + if (fence->ring == R600_RING_TYPE_DMA_INDEX) + ref_and_mask = SDMA0; + else + ref_and_mask = SDMA1; + + /* write the fence */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); + radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + radeon_ring_write(ring, fence->seq); + /* generate an interrupt */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); + /* flush HDP */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); + radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ + radeon_ring_write(ring, ref_and_mask); /* MASK */ + radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ +} + +/** + * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @semaphore: radeon semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (CIK). + */ +void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); + radeon_ring_write(ring, addr & 0xfffffff8); + radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); +} + +/** + * cik_sdma_gfx_stop - stop the gfx async dma engines + * + * @rdev: radeon_device pointer + * + * Stop the gfx async dma ring buffers (CIK). + */ +static void cik_sdma_gfx_stop(struct radeon_device *rdev) +{ + u32 rb_cntl, reg_offset; + int i; + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + + for (i = 0; i < 2; i++) { + if (i == 0) + reg_offset = SDMA0_REGISTER_OFFSET; + else + reg_offset = SDMA1_REGISTER_OFFSET; + rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); + rb_cntl &= ~SDMA_RB_ENABLE; + WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); + WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); + } +} + +/** + * cik_sdma_rlc_stop - stop the compute async dma engines + * + * @rdev: radeon_device pointer + * + * Stop the compute async dma queues (CIK). + */ +static void cik_sdma_rlc_stop(struct radeon_device *rdev) +{ + /* XXX todo */ +} + +/** + * cik_sdma_enable - stop the async dma engines + * + * @rdev: radeon_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines (CIK). + */ +void cik_sdma_enable(struct radeon_device *rdev, bool enable) +{ + u32 me_cntl, reg_offset; + int i; + + for (i = 0; i < 2; i++) { + if (i == 0) + reg_offset = SDMA0_REGISTER_OFFSET; + else + reg_offset = SDMA1_REGISTER_OFFSET; + me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); + if (enable) + me_cntl &= ~SDMA_HALT; + else + me_cntl |= SDMA_HALT; + WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); + } +} + +/** + * cik_sdma_gfx_resume - setup and start the async dma engines + * + * @rdev: radeon_device pointer + * + * Set up the gfx DMA ring buffers and enable them (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_gfx_resume(struct radeon_device *rdev) +{ + struct radeon_ring *ring; + u32 rb_cntl, ib_cntl; + u32 rb_bufsz; + u32 reg_offset, wb_offset; + int i, r; + + for (i = 0; i < 2; i++) { + if (i == 0) { + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + reg_offset = SDMA0_REGISTER_OFFSET; + wb_offset = R600_WB_DMA_RPTR_OFFSET; + } else { + ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; + reg_offset = SDMA1_REGISTER_OFFSET; + wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; + } + + WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); + WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = drm_order(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); + WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, + upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, + ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); + + if (rdev->wb.enabled) + rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; + + WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); + WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); + + ring->wptr = 0; + WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); + + ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; + + /* enable DMA RB */ + WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); + + ib_cntl = SDMA_IB_ENABLE; +#ifdef __BIG_ENDIAN + ib_cntl |= SDMA_IB_SWAP_ENABLE; +#endif + /* enable DMA IBs */ + WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); + + ring->ready = true; + + r = radeon_ring_test(rdev, ring->idx, ring); + if (r) { + ring->ready = false; + return r; + } + } + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + return 0; +} + +/** + * cik_sdma_rlc_resume - setup and start the async dma engines + * + * @rdev: radeon_device pointer + * + * Set up the compute DMA queues and enable them (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_rlc_resume(struct radeon_device *rdev) +{ + /* XXX todo */ + return 0; +} + +/** + * cik_sdma_load_microcode - load the sDMA ME ucode + * + * @rdev: radeon_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int cik_sdma_load_microcode(struct radeon_device *rdev) +{ + const __be32 *fw_data; + int i; + + if (!rdev->sdma_fw) + return -EINVAL; + + /* stop the gfx rings and rlc compute queues */ + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + + /* halt the MEs */ + cik_sdma_enable(rdev, false); + + /* sdma0 */ + fw_data = (const __be32 *)rdev->sdma_fw->data; + WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); + for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + + /* sdma1 */ + fw_data = (const __be32 *)rdev->sdma_fw->data; + WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); + for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + + WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); + WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); + return 0; +} + +/** + * cik_sdma_resume - setup and start the async dma engines + * + * @rdev: radeon_device pointer + * + * Set up the DMA engines and enable them (CIK). + * Returns 0 for success, error for failure. + */ +int cik_sdma_resume(struct radeon_device *rdev) +{ + int r; + + /* Reset dma */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + RREG32(SRBM_SOFT_RESET); + + r = cik_sdma_load_microcode(rdev); + if (r) + return r; + + /* unhalt the MEs */ + cik_sdma_enable(rdev, true); + + /* start the gfx rings and rlc compute queues */ + r = cik_sdma_gfx_resume(rdev); + if (r) + return r; + r = cik_sdma_rlc_resume(rdev); + if (r) + return r; + + return 0; +} + +/** + * cik_sdma_fini - tear down the async dma engines + * + * @rdev: radeon_device pointer + * + * Stop the async dma engines and free the rings (CIK). + */ +void cik_sdma_fini(struct radeon_device *rdev) +{ + /* stop the gfx rings and rlc compute queues */ + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + /* halt the MEs */ + cik_sdma_enable(rdev, false); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); + radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); + /* XXX - compute dma queue tear down */ +} + +/** + * cik_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (CIK). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int cik_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_bytes, cur_size_in_bytes; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); + num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); + r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_bytes = size_in_bytes; + if (cur_size_in_bytes > 0x1fffff) + cur_size_in_bytes = 0x1fffff; + size_in_bytes -= cur_size_in_bytes; + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); + radeon_ring_write(ring, cur_size_in_bytes); + radeon_ring_write(ring, 0); /* src/dst endian swap */ + radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); + src_offset += cur_size_in_bytes; + dst_offset += cur_size_in_bytes; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + +/** + * cik_sdma_ring_test - simple async dma engine test + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (CIK). + * Returns 0 for success, error for failure. + */ +int cik_sdma_ring_test(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ring_lock(rdev, ring, 4); + if (r) { + DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); + return r; + } + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); + radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); + radeon_ring_write(ring, 1); /* number of DWs to follow */ + radeon_ring_write(ring, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev, ring); + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * cik_sdma_ib_test - test an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test a simple IB in the DMA ring (CIK). + * Returns 0 on success, error on failure. + */ +int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_ib ib; + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp = 0; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; + ib.ptr[3] = 1; + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + return r; + } + r = radeon_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < rdev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); + } else { + DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + radeon_ib_free(rdev, &ib); + return r; +} + +/** + * cik_sdma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up (CIK). + * Returns true if the engine appears to be locked up, false if not. + */ +bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = cik_gpu_check_soft_reset(rdev); + u32 mask; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + mask = RADEON_RESET_DMA; + else + mask = RADEON_RESET_DMA1; + + if (!(reset_mask & mask)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + +/** + * cik_sdma_vm_set_page - update the page tables using sDMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +void cik_sdma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); + uint64_t value; + unsigned ndw; + + if (flags & RADEON_VM_PAGE_SYSTEM) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= r600_flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } + } else { + while (count) { + ndw = count; + if (ndw > 0x7FFFF) + ndw = 0x7FFFF; + + if (flags & RADEON_VM_PAGE_VALID) + value = addr; + else + value = 0; + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = r600_flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = ndw; /* number of entries */ + pe += ndw * 8; + addr += ndw * incr; + count -= ndw; + } + } + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); +} + +/** + * cik_dma_vm_flush - cik vm flush using sDMA + * + * @rdev: radeon_device pointer + * + * Update the page table base and flush the VM TLB + * using sDMA (CIK). + */ +void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ + u32 ref_and_mask; + + if (vm == NULL) + return; + + if (ridx == R600_RING_TYPE_DMA_INDEX) + ref_and_mask = SDMA0; + else + ref_and_mask = SDMA1; + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + if (vm->id < 8) { + radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); + } else { + radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); + } + radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + + /* update SH_MEM_* regs */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); + radeon_ring_write(ring, VMID(vm->id)); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SH_MEM_BASES >> 2); + radeon_ring_write(ring, 0); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SH_MEM_CONFIG >> 2); + radeon_ring_write(ring, 0); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2); + radeon_ring_write(ring, 1); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2); + radeon_ring_write(ring, 0); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); + radeon_ring_write(ring, VMID(0)); + + /* flush HDP */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); + radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ + radeon_ring_write(ring, ref_and_mask); /* MASK */ + radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ + + /* flush TLB */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 1 << vm->id); +} + diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 52ed22333f0d..bbaa4f2056ce 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3613,7 +3613,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev) return true; } -static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) +u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -3839,28 +3839,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin return radeon_ring_test_lockup(rdev, ring); } -/** - * evergreen_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); - - if (!(reset_mask & RADEON_RESET_DMA)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - /* * RLC */ @@ -5024,143 +5002,6 @@ restart_ih: return IRQ_HANDLED; } -/** - * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring - * - * @rdev: radeon_device pointer - * @fence: radeon fence object - * - * Add a DMA fence packet to the ring to write - * the fence seq number and DMA trap packet to generate - * an interrupt if needed (evergreen-SI). - */ -void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence) -{ - struct radeon_ring *ring = &rdev->ring[fence->ring]; - u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - /* write the fence */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); - radeon_ring_write(ring, addr & 0xfffffffc); - radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); - radeon_ring_write(ring, fence->seq); - /* generate an interrupt */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); - /* flush HDP */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); -} - -/** - * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (evergreen). - */ -void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 4; - while ((next_rptr & 7) != 5) - next_rptr++; - next_rptr += 3; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); - radeon_ring_write(ring, next_rptr); - } - - /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. - * Pad as necessary with NOPs. - */ - while ((ring->wptr & 7) != 5) - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); - radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); - radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); - -} - -/** - * evergreen_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (evergreen-cayman). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_dw, cur_size_in_dw; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_dw = size_in_dw; - if (cur_size_in_dw > 0xFFFFF) - cur_size_in_dw = 0xFFFFF; - size_in_dw -= cur_size_in_dw; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_dw * 4; - dst_offset += cur_size_in_dw * 4; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - static int evergreen_startup(struct radeon_device *rdev) { struct radeon_ring *ring; diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c new file mode 100644 index 000000000000..6a0656d00ed0 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -0,0 +1,190 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "evergreend.h" + +u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev); + +/** + * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (evergreen-SI). + */ +void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + /* write the fence */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); + radeon_ring_write(ring, fence->seq); + /* generate an interrupt */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); + /* flush HDP */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); + radeon_ring_write(ring, 1); +} + +/** + * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (evergreen). + */ +void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, + struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + +/** + * evergreen_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (evergreen-cayman). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFFF) + cur_size_in_dw = 0xFFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + +/** + * evergreen_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); + + if (!(reset_mask & RADEON_RESET_DMA)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + + diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 0205fa1594fa..2db8ce0023ac 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -174,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); extern void evergreen_program_aspm(struct radeon_device *rdev); extern void sumo_rlc_fini(struct radeon_device *rdev); extern int sumo_rlc_init(struct radeon_device *rdev); +extern void cayman_dma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); /* Firmware Names */ MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); @@ -1595,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev) return 0; } -/* - * DMA - * Starting with R600, the GPU has an asynchronous - * DMA engine. The programming model is very similar - * to the 3D engine (ring buffer, IBs, etc.), but the - * DMA controller has it's own packet format that is - * different form the PM4 format used by the 3D engine. - * It supports copying data, writing embedded data, - * solid fills, and a number of other things. It also - * has support for tiling/detiling of buffers. - * Cayman and newer support two asynchronous DMA engines. - */ -/** - * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (cayman-SI). - */ -void cayman_dma_ring_ib_execute(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 4; - while ((next_rptr & 7) != 5) - next_rptr++; - next_rptr += 3; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); - radeon_ring_write(ring, next_rptr); - } - - /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. - * Pad as necessary with NOPs. - */ - while ((ring->wptr & 7) != 5) - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); - radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); - radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); - radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); - -} - -/** - * cayman_dma_stop - stop the async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the async dma engines (cayman-SI). - */ -void cayman_dma_stop(struct radeon_device *rdev) -{ - u32 rb_cntl; - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - - /* dma0 */ - rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); - - /* dma1 */ - rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); - - rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; - rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; -} - -/** - * cayman_dma_resume - setup and start the async dma engines - * - * @rdev: radeon_device pointer - * - * Set up the DMA ring buffers and enable them. (cayman-SI). - * Returns 0 for success, error for failure. - */ -int cayman_dma_resume(struct radeon_device *rdev) -{ - struct radeon_ring *ring; - u32 rb_cntl, dma_cntl, ib_cntl; - u32 rb_bufsz; - u32 reg_offset, wb_offset; - int i, r; - - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - - for (i = 0; i < 2; i++) { - if (i == 0) { - ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; - reg_offset = DMA0_REGISTER_OFFSET; - wb_offset = R600_WB_DMA_RPTR_OFFSET; - } else { - ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; - reg_offset = DMA1_REGISTER_OFFSET; - wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; - } - - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); - WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); - - /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); - rb_cntl = rb_bufsz << 1; -#ifdef __BIG_ENDIAN - rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; -#endif - WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(DMA_RB_RPTR + reg_offset, 0); - WREG32(DMA_RB_WPTR + reg_offset, 0); - - /* set the wb address whether it's enabled or not */ - WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, - upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); - WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, - ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); - - if (rdev->wb.enabled) - rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; - - WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); - - /* enable DMA IBs */ - ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; -#ifdef __BIG_ENDIAN - ib_cntl |= DMA_IB_SWAP_ENABLE; -#endif - WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); - - dma_cntl = RREG32(DMA_CNTL + reg_offset); - dma_cntl &= ~CTXEMPTY_INT_ENABLE; - WREG32(DMA_CNTL + reg_offset, dma_cntl); - - ring->wptr = 0; - WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); - - ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; - - WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); - - ring->ready = true; - - r = radeon_ring_test(rdev, ring->idx, ring); - if (r) { - ring->ready = false; - return r; - } - } - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - - return 0; -} - -/** - * cayman_dma_fini - tear down the async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the async dma engines and free the rings (cayman-SI). - */ -void cayman_dma_fini(struct radeon_device *rdev) -{ - cayman_dma_stop(rdev); - radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); - radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); -} - -static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) +u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -2027,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } -/** - * cayman_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = cayman_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - static int cayman_startup(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; @@ -2658,61 +2456,7 @@ void cayman_vm_set_page(struct radeon_device *rdev, } } } else { - if ((flags & RADEON_VM_PAGE_SYSTEM) || - (count == 1)) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); + cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); } } @@ -2746,26 +2490,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); } - -void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) -{ - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - - /* flush hdp cache */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); - - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); - radeon_ring_write(ring, 1 << vm->id); -} - diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c new file mode 100644 index 000000000000..0f3c0baea4a6 --- /dev/null +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -0,0 +1,338 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "nid.h" + +u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); + +/* + * DMA + * Starting with R600, the GPU has an asynchronous + * DMA engine. The programming model is very similar + * to the 3D engine (ring buffer, IBs, etc.), but the + * DMA controller has it's own packet format that is + * different form the PM4 format used by the 3D engine. + * It supports copying data, writing embedded data, + * solid fills, and a number of other things. It also + * has support for tiling/detiling of buffers. + * Cayman and newer support two asynchronous DMA engines. + */ + +/** + * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (cayman-SI). + */ +void cayman_dma_ring_ib_execute(struct radeon_device *rdev, + struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + +/** + * cayman_dma_stop - stop the async dma engines + * + * @rdev: radeon_device pointer + * + * Stop the async dma engines (cayman-SI). + */ +void cayman_dma_stop(struct radeon_device *rdev) +{ + u32 rb_cntl; + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + + /* dma0 */ + rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); + + /* dma1 */ + rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); + + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; +} + +/** + * cayman_dma_resume - setup and start the async dma engines + * + * @rdev: radeon_device pointer + * + * Set up the DMA ring buffers and enable them. (cayman-SI). + * Returns 0 for success, error for failure. + */ +int cayman_dma_resume(struct radeon_device *rdev) +{ + struct radeon_ring *ring; + u32 rb_cntl, dma_cntl, ib_cntl; + u32 rb_bufsz; + u32 reg_offset, wb_offset; + int i, r; + + /* Reset dma */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + for (i = 0; i < 2; i++) { + if (i == 0) { + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + reg_offset = DMA0_REGISTER_OFFSET; + wb_offset = R600_WB_DMA_RPTR_OFFSET; + } else { + ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; + reg_offset = DMA1_REGISTER_OFFSET; + wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; + } + + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = drm_order(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(DMA_RB_RPTR + reg_offset, 0); + WREG32(DMA_RB_WPTR + reg_offset, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, + upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); + WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, + ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); + + if (rdev->wb.enabled) + rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + + WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); + + /* enable DMA IBs */ + ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; +#ifdef __BIG_ENDIAN + ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif + WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); + + dma_cntl = RREG32(DMA_CNTL + reg_offset); + dma_cntl &= ~CTXEMPTY_INT_ENABLE; + WREG32(DMA_CNTL + reg_offset, dma_cntl); + + ring->wptr = 0; + WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); + + ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; + + WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); + + ring->ready = true; + + r = radeon_ring_test(rdev, ring->idx, ring); + if (r) { + ring->ready = false; + return r; + } + } + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + return 0; +} + +/** + * cayman_dma_fini - tear down the async dma engines + * + * @rdev: radeon_device pointer + * + * Stop the async dma engines and free the rings (cayman-SI). + */ +void cayman_dma_fini(struct radeon_device *rdev) +{ + cayman_dma_stop(rdev); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); + radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); +} + +/** + * cayman_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = cayman_gpu_check_soft_reset(rdev); + u32 mask; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + mask = RADEON_RESET_DMA; + else + mask = RADEON_RESET_DMA1; + + if (!(reset_mask & mask)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + +/** + * cayman_dma_vm_set_page - update the page tables using the DMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * @r600_flags: hw access flags + * + * Update the page tables using the DMA (cayman/TN). + */ +void cayman_dma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); + uint64_t value; + unsigned ndw; + + if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= r600_flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } + } else { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & RADEON_VM_PAGE_VALID) + value = addr; + else + value = 0; + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = r600_flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } + } + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); +} + +void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + + if (vm == NULL) + return; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); + radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + + /* flush hdp cache */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); + radeon_ring_write(ring, 1); + + /* bits 0-7 are the VM contexts0-7 */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 1 << vm->id); +} + diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3a08ef92d33f..087cff444ba2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev) return true; } -static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) +u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } -/** - * r600_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = r600_gpu_check_soft_reset(rdev); - - if (!(reset_mask & RADEON_RESET_DMA)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - u32 r6xx_remap_render_backend(struct radeon_device *rdev, u32 tiling_pipe_num, u32 max_rb_num, @@ -2493,176 +2471,6 @@ void r600_cp_fini(struct radeon_device *rdev) radeon_scratch_free(rdev, ring->rptr_save_reg); } -/* - * DMA - * Starting with R600, the GPU has an asynchronous - * DMA engine. The programming model is very similar - * to the 3D engine (ring buffer, IBs, etc.), but the - * DMA controller has it's own packet format that is - * different form the PM4 format used by the 3D engine. - * It supports copying data, writing embedded data, - * solid fills, and a number of other things. It also - * has support for tiling/detiling of buffers. - */ - -/** - * r600_dma_get_rptr - get the current read pointer - * - * @rdev: radeon_device pointer - * @ring: radeon ring pointer - * - * Get the current rptr from the hardware (r6xx+). - */ -uint32_t r600_dma_get_rptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; -} - -/** - * r600_dma_get_wptr - get the current write pointer - * - * @rdev: radeon_device pointer - * @ring: radeon ring pointer - * - * Get the current wptr from the hardware (r6xx+). - */ -uint32_t r600_dma_get_wptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; -} - -/** - * r600_dma_set_wptr - commit the write pointer - * - * @rdev: radeon_device pointer - * @ring: radeon ring pointer - * - * Write the wptr back to the hardware (r6xx+). - */ -void r600_dma_set_wptr(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); -} - -/** - * r600_dma_stop - stop the async dma engine - * - * @rdev: radeon_device pointer - * - * Stop the async dma engine (r6xx-evergreen). - */ -void r600_dma_stop(struct radeon_device *rdev) -{ - u32 rb_cntl = RREG32(DMA_RB_CNTL); - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL, rb_cntl); - - rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; -} - -/** - * r600_dma_resume - setup and start the async dma engine - * - * @rdev: radeon_device pointer - * - * Set up the DMA ring buffer and enable it. (r6xx-evergreen). - * Returns 0 for success, error for failure. - */ -int r600_dma_resume(struct radeon_device *rdev) -{ - struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; - u32 rb_cntl, dma_cntl, ib_cntl; - u32 rb_bufsz; - int r; - - /* Reset dma */ - if (rdev->family >= CHIP_RV770) - WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); - else - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); - WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); - - /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); - rb_cntl = rb_bufsz << 1; -#ifdef __BIG_ENDIAN - rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; -#endif - WREG32(DMA_RB_CNTL, rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(DMA_RB_RPTR, 0); - WREG32(DMA_RB_WPTR, 0); - - /* set the wb address whether it's enabled or not */ - WREG32(DMA_RB_RPTR_ADDR_HI, - upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); - WREG32(DMA_RB_RPTR_ADDR_LO, - ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); - - if (rdev->wb.enabled) - rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; - - WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); - - /* enable DMA IBs */ - ib_cntl = DMA_IB_ENABLE; -#ifdef __BIG_ENDIAN - ib_cntl |= DMA_IB_SWAP_ENABLE; -#endif - WREG32(DMA_IB_CNTL, ib_cntl); - - dma_cntl = RREG32(DMA_CNTL); - dma_cntl &= ~CTXEMPTY_INT_ENABLE; - WREG32(DMA_CNTL, dma_cntl); - - if (rdev->family >= CHIP_RV770) - WREG32(DMA_MODE, 1); - - ring->wptr = 0; - WREG32(DMA_RB_WPTR, ring->wptr << 2); - - ring->rptr = RREG32(DMA_RB_RPTR) >> 2; - - WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); - - ring->ready = true; - - r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); - if (r) { - ring->ready = false; - return r; - } - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - - return 0; -} - -/** - * r600_dma_fini - tear down the async dma engine - * - * @rdev: radeon_device pointer - * - * Stop the async dma engine and free the ring (r6xx-evergreen). - */ -void r600_dma_fini(struct radeon_device *rdev) -{ - r600_dma_stop(rdev); - radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); -} - /* * GPU scratch registers helpers function. */ @@ -2718,60 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } -/** - * r600_dma_ring_test - simple async dma engine test - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Test the DMA engine by writing using it to write an - * value to memory. (r6xx-SI). - * Returns 0 for success, error for failure. - */ -int r600_dma_ring_test(struct radeon_device *rdev, - struct radeon_ring *ring) -{ - unsigned i; - int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; - u32 tmp; - - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } - - tmp = 0xCAFEDEAD; - writel(tmp, ptr); - - r = radeon_ring_lock(rdev, ring, 4); - if (r) { - DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); - return r; - } - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); - radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); - - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - - if (i < rdev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - return r; -} - /* * CP fences/semaphores */ @@ -2839,59 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); } -/* - * DMA fences/semaphores - */ - -/** - * r600_dma_fence_ring_emit - emit a fence on the DMA ring - * - * @rdev: radeon_device pointer - * @fence: radeon fence object - * - * Add a DMA fence packet to the ring to write - * the fence seq number and DMA trap packet to generate - * an interrupt if needed (r6xx-r7xx). - */ -void r600_dma_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence) -{ - struct radeon_ring *ring = &rdev->ring[fence->ring]; - u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - - /* write the fence */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); - radeon_ring_write(ring, addr & 0xfffffffc); - radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); - radeon_ring_write(ring, lower_32_bits(fence->seq)); - /* generate an interrupt */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); -} - -/** - * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * @semaphore: radeon semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (r6xx-SI). - */ -void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 s = emit_wait ? 0 : 1; - - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); - radeon_ring_write(ring, addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(addr) & 0xff); -} - /** * r600_copy_cpdma - copy pages using the CP DMA engine * @@ -2976,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev, return r; } -/** - * r600_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (r6xx). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_dw, cur_size_in_dw; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); - r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_dw = size_in_dw; - if (cur_size_in_dw > 0xFFFE) - cur_size_in_dw = 0xFFFE; - size_in_dw -= cur_size_in_dw; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | - (upper_32_bits(src_offset) & 0xff))); - src_offset += cur_size_in_dw * 4; - dst_offset += cur_size_in_dw * 4; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size) @@ -3409,104 +3036,6 @@ free_scratch: return r; } -/** - * r600_dma_ib_test - test an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Test a simple IB in the DMA ring (r6xx-SI). - * Returns 0 on success, error on failure. - */ -int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) -{ - struct radeon_ib ib; - unsigned i; - int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; - u32 tmp = 0; - - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } - - tmp = 0xCAFEDEAD; - writel(tmp, ptr); - - r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); - if (r) { - DRM_ERROR("radeon: failed to get ib (%d).\n", r); - return r; - } - - ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; - ib.ptr[3] = 0xDEADBEEF; - ib.length_dw = 4; - - r = radeon_ib_schedule(rdev, &ib, NULL); - if (r) { - radeon_ib_free(rdev, &ib); - DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); - return r; - } - r = radeon_fence_wait(ib.fence, false); - if (r) { - DRM_ERROR("radeon: fence wait failed (%d).\n", r); - return r; - } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < rdev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); - } else { - DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); - r = -EINVAL; - } - radeon_ib_free(rdev, &ib); - return r; -} - -/** - * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (r6xx-r7xx). - */ -void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 4; - while ((next_rptr & 7) != 5) - next_rptr++; - next_rptr += 3; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); - radeon_ring_write(ring, next_rptr); - } - - /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. - * Pad as necessary with NOPs. - */ - while ((ring->wptr & 7) != 5) - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); - radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); - radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); - -} - /* * Interrupts * diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c new file mode 100644 index 000000000000..bff05576266b --- /dev/null +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -0,0 +1,497 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "r600d.h" + +u32 r600_gpu_check_soft_reset(struct radeon_device *rdev); + +/* + * DMA + * Starting with R600, the GPU has an asynchronous + * DMA engine. The programming model is very similar + * to the 3D engine (ring buffer, IBs, etc.), but the + * DMA controller has it's own packet format that is + * different form the PM4 format used by the 3D engine. + * It supports copying data, writing embedded data, + * solid fills, and a number of other things. It also + * has support for tiling/detiling of buffers. + */ + +/** + * r600_dma_get_rptr - get the current read pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current rptr from the hardware (r6xx+). + */ +uint32_t r600_dma_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; +} + +/** + * r600_dma_get_wptr - get the current write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Get the current wptr from the hardware (r6xx+). + */ +uint32_t r600_dma_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; +} + +/** + * r600_dma_set_wptr - commit the write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon ring pointer + * + * Write the wptr back to the hardware (r6xx+). + */ +void r600_dma_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); +} + +/** + * r600_dma_stop - stop the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine (r6xx-evergreen). + */ +void r600_dma_stop(struct radeon_device *rdev) +{ + u32 rb_cntl = RREG32(DMA_RB_CNTL); + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, rb_cntl); + + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; +} + +/** + * r600_dma_resume - setup and start the async dma engine + * + * @rdev: radeon_device pointer + * + * Set up the DMA ring buffer and enable it. (r6xx-evergreen). + * Returns 0 for success, error for failure. + */ +int r600_dma_resume(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + u32 rb_cntl, dma_cntl, ib_cntl; + u32 rb_bufsz; + int r; + + /* Reset dma */ + if (rdev->family >= CHIP_RV770) + WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); + else + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = drm_order(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(DMA_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(DMA_RB_RPTR, 0); + WREG32(DMA_RB_WPTR, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(DMA_RB_RPTR_ADDR_HI, + upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); + WREG32(DMA_RB_RPTR_ADDR_LO, + ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); + + if (rdev->wb.enabled) + rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + + WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); + + /* enable DMA IBs */ + ib_cntl = DMA_IB_ENABLE; +#ifdef __BIG_ENDIAN + ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif + WREG32(DMA_IB_CNTL, ib_cntl); + + dma_cntl = RREG32(DMA_CNTL); + dma_cntl &= ~CTXEMPTY_INT_ENABLE; + WREG32(DMA_CNTL, dma_cntl); + + if (rdev->family >= CHIP_RV770) + WREG32(DMA_MODE, 1); + + ring->wptr = 0; + WREG32(DMA_RB_WPTR, ring->wptr << 2); + + ring->rptr = RREG32(DMA_RB_RPTR) >> 2; + + WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); + + ring->ready = true; + + r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + return 0; +} + +/** + * r600_dma_fini - tear down the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine and free the ring (r6xx-evergreen). + */ +void r600_dma_fini(struct radeon_device *rdev) +{ + r600_dma_stop(rdev); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); +} + +/** + * r600_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = r600_gpu_check_soft_reset(rdev); + + if (!(reset_mask & RADEON_RESET_DMA)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + + +/** + * r600_dma_ring_test - simple async dma engine test + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (r6xx-SI). + * Returns 0 for success, error for failure. + */ +int r600_dma_ring_test(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ring_lock(rdev, ring, 4); + if (r) { + DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); + return r; + } + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); + radeon_ring_write(ring, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev, ring); + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * r600_dma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (r6xx-r7xx). + */ +void r600_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + + /* write the fence */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); + radeon_ring_write(ring, lower_32_bits(fence->seq)); + /* generate an interrupt */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); +} + +/** + * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @semaphore: radeon semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (r6xx-SI). + */ +void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 s = emit_wait ? 0 : 1; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(addr) & 0xff); +} + +/** + * r600_dma_ib_test - test an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test a simple IB in the DMA ring (r6xx-SI). + * Returns 0 on success, error on failure. + */ +int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_ib ib; + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp = 0; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); + ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[3] = 0xDEADBEEF; + ib.length_dw = 4; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + return r; + } + r = radeon_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < rdev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); + } else { + DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + radeon_ib_free(rdev, &ib); + return r; +} + +/** + * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (r6xx-r7xx). + */ +void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + +/** + * r600_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r6xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); + r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFE) + cur_size_in_dw = 0xFFFE; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | + (upper_32_bits(src_offset) & 0xff))); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index aaab7b1bba27..b811296462a3 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1650,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev) return 0; } -/** - * rv770_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (r7xx). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int rv770_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_dw, cur_size_in_dw; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_dw = size_in_dw; - if (cur_size_in_dw > 0xFFFF) - cur_size_in_dw = 0xFFFF; - size_in_dw -= cur_size_in_dw; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_dw * 4; - dst_offset += cur_size_in_dw * 4; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - static int rv770_startup(struct radeon_device *rdev) { struct radeon_ring *ring; diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c new file mode 100644 index 000000000000..f9b02e3d6830 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -0,0 +1,101 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "rv770d.h" + +/** + * rv770_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r7xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFF) + cur_size_in_dw = 0xFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f3f79089405e..f5307e6bb92b 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -78,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); extern bool evergreen_is_display_hung(struct radeon_device *rdev); +extern void si_dma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); static const u32 verde_rlc_save_restore_register_list[] = { @@ -3495,7 +3500,7 @@ static int si_cp_resume(struct radeon_device *rdev) return 0; } -static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) +u32 si_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -3744,34 +3749,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } -/** - * si_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = si_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - /* MC */ static void si_mc_program(struct radeon_device *rdev) { @@ -4710,58 +4687,7 @@ void si_vm_set_page(struct radeon_device *rdev, } } else { /* DMA */ - if (flags & RADEON_VM_PAGE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); + si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); } } @@ -4808,32 +4734,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, 0x0); } -void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) -{ - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - if (vm->id < 8) { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); - } else { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); - } - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - - /* flush hdp cache */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); - - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); - radeon_ring_write(ring, 1 << vm->id); -} - /* * Power and clock gating */ @@ -6177,80 +6077,6 @@ restart_ih: return IRQ_HANDLED; } -/** - * si_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (SI). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_bytes, cur_size_in_bytes; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); - num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_bytes = size_in_bytes; - if (cur_size_in_bytes > 0xFFFFF) - cur_size_in_bytes = 0xFFFFF; - size_in_bytes -= cur_size_in_bytes; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); - radeon_ring_write(ring, dst_offset & 0xffffffff); - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_bytes; - dst_offset += cur_size_in_bytes; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - /* * startup/shutdown callbacks */ diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c new file mode 100644 index 000000000000..49909d23dfce --- /dev/null +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -0,0 +1,235 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include "radeon.h" +#include "radeon_asic.h" +#include "sid.h" + +u32 si_gpu_check_soft_reset(struct radeon_device *rdev); + +/** + * si_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = si_gpu_check_soft_reset(rdev); + u32 mask; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + mask = RADEON_RESET_DMA; + else + mask = RADEON_RESET_DMA1; + + if (!(reset_mask & mask)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + +/** + * si_dma_vm_set_page - update the page tables using the DMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using the DMA (SI). + */ +void si_dma_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); + uint64_t value; + unsigned ndw; + + if (flags & RADEON_VM_PAGE_SYSTEM) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= r600_flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } + } else { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & RADEON_VM_PAGE_VALID) + value = addr; + else + value = 0; + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = r600_flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } + } + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); +} + +void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + + if (vm == NULL) + return; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + if (vm->id < 8) { + radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); + } else { + radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); + } + radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + + /* flush hdp cache */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); + radeon_ring_write(ring, 1); + + /* bits 0-7 are the VM contexts0-7 */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 1 << vm->id); +} + +/** + * si_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (SI). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_bytes, cur_size_in_bytes; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); + num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_bytes = size_in_bytes; + if (cur_size_in_bytes > 0xFFFFF) + cur_size_in_bytes = 0xFFFFF; + size_in_bytes -= cur_size_in_bytes; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); + radeon_ring_write(ring, dst_offset & 0xffffffff); + radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_bytes; + dst_offset += cur_size_in_bytes; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + -- cgit v1.2.3 From d105f4768a959f22078820625ccec9b693f164c8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2013 15:55:32 -0400 Subject: drm/edid: add a helper function to extract the speaker allocation data block (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a helper function to extract the speaker allocation data block from the EDID. This data block describes what speakers are present on the display device. v2: update per Ville Syrjälä's comments v3: fix copy/paste typo in memory allocation Signed-off-by: Alex Deucher Reviewed-by: Ville Syrjälä Tested-by: Rafał Miłecki --- drivers/gpu/drm/drm_edid.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_edid.h | 1 + 2 files changed, 53 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 70fc1335e331..58b4882feedf 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2734,6 +2734,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) } EXPORT_SYMBOL(drm_edid_to_sad); +/** + * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID + * @edid: EDID to parse + * @sadb: pointer to the speaker block + * + * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it. + * Note: returned pointer needs to be kfreed + * + * Return number of found Speaker Allocation Blocks or negative number on error. + */ +int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) +{ + int count = 0; + int i, start, end, dbl; + const u8 *cea; + + cea = drm_find_cea_extension(edid); + if (!cea) { + DRM_DEBUG_KMS("SAD: no CEA Extension found\n"); + return -ENOENT; + } + + if (cea_revision(cea) < 3) { + DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); + return -ENOTSUPP; + } + + if (cea_db_offsets(cea, &start, &end)) { + DRM_DEBUG_KMS("SAD: invalid data block offsets\n"); + return -EPROTO; + } + + for_each_cea_db(cea, i, start, end) { + const u8 *db = &cea[i]; + + if (cea_db_tag(db) == SPEAKER_BLOCK) { + dbl = cea_db_payload_len(db); + + /* Speaker Allocation Data Block */ + if (dbl == 3) { + *sadb = kmalloc(dbl, GFP_KERNEL); + memcpy(*sadb, &db[1], dbl); + count = dbl; + break; + } + } + } + + return count; +} +EXPORT_SYMBOL(drm_edid_to_speaker_allocation); + /** * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond * @connector: connector associated with the HDMI/DP sink diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index fc481fc17085..c76a129b9953 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -259,6 +259,7 @@ struct hdmi_avi_infoframe; void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); +int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_connector *drm_select_eld(struct drm_encoder *encoder, -- cgit v1.2.3 From a4d39e68949f5b4f7b426be63782b421018f741a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 1 Aug 2013 17:29:16 +0200 Subject: drm/radeon: use loop for initializing AFMT blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_display.c | 53 ++++++++++++++------------------- 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c2b67b4e1ac2..31d9fbe85c72 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1257,38 +1257,31 @@ static void radeon_afmt_init(struct radeon_device *rdev) if (ASIC_IS_DCE6(rdev)) { /* todo */ } else if (ASIC_IS_DCE4(rdev)) { + static uint32_t eg_offsets[] = { + EVERGREEN_CRTC0_REGISTER_OFFSET, + EVERGREEN_CRTC1_REGISTER_OFFSET, + EVERGREEN_CRTC2_REGISTER_OFFSET, + EVERGREEN_CRTC3_REGISTER_OFFSET, + EVERGREEN_CRTC4_REGISTER_OFFSET, + EVERGREEN_CRTC5_REGISTER_OFFSET, + }; + int num_afmt; + /* DCE4/5 has 6 audio blocks tied to DIG encoders */ /* DCE4.1 has 2 audio blocks tied to DIG encoders */ - rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[0]) { - rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET; - rdev->mode_info.afmt[0]->id = 0; - } - rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[1]) { - rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET; - rdev->mode_info.afmt[1]->id = 1; - } - if (!ASIC_IS_DCE41(rdev)) { - rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[2]) { - rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET; - rdev->mode_info.afmt[2]->id = 2; - } - rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[3]) { - rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET; - rdev->mode_info.afmt[3]->id = 3; - } - rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[4]) { - rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET; - rdev->mode_info.afmt[4]->id = 4; - } - rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); - if (rdev->mode_info.afmt[5]) { - rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET; - rdev->mode_info.afmt[5]->id = 5; + if (ASIC_IS_DCE5(rdev)) + num_afmt = 6; + else if (ASIC_IS_DCE41(rdev)) + num_afmt = 2; + else /* DCE4 */ + num_afmt = 6; + + BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); + for (i = 0; i < num_afmt; i++) { + rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); + if (rdev->mode_info.afmt[i]) { + rdev->mode_info.afmt[i]->offset = eg_offsets[i]; + rdev->mode_info.afmt[i]->id = i; } } } else if (ASIC_IS_DCE3(rdev)) { -- cgit v1.2.3 From b530602fd4625f763344e455902981b22f85f609 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2013 16:51:33 -0400 Subject: drm/radeon: add audio support for DCE6/8 GPUs (v12) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to DCE4/5, but supports multiple audio pins which can be assigned per afmt block. v2: rework the driver to handle more than one audio pin. v3: try different dto reg v4: properly program dto v5 (ck): change dto programming order v6: program speaker allocation block v7: rebase v8: rebase on Rafał's changes v9: integrated Rafał's comments, update to latest drm_edid_to_speaker_allocation API v10: add missing line break in error message v11: add back audio enabled messages v12: fix copy paste typo in r600_audio_enable Signed-off-by: Alex Deucher Signed-off-by: Christian König Acked-by: Rafał Miłecki --- drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/atombios_encoders.c | 11 +- drivers/gpu/drm/radeon/cik.c | 5 + drivers/gpu/drm/radeon/dce6_afmt.c | 251 +++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/evergreen_hdmi.c | 54 +++++-- drivers/gpu/drm/radeon/ni.c | 17 +- drivers/gpu/drm/radeon/r600_audio.c | 60 ++++--- drivers/gpu/drm/radeon/r600_hdmi.c | 7 +- drivers/gpu/drm/radeon/radeon.h | 18 ++- drivers/gpu/drm/radeon/radeon_asic.c | 8 + drivers/gpu/drm/radeon/radeon_asic.h | 4 +- drivers/gpu/drm/radeon/radeon_display.c | 13 +- drivers/gpu/drm/radeon/radeon_mode.h | 3 +- drivers/gpu/drm/radeon/si.c | 5 + drivers/gpu/drm/radeon/sid.h | 59 +++++++ 15 files changed, 455 insertions(+), 62 deletions(-) create mode 100644 drivers/gpu/drm/radeon/dce6_afmt.c diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index da2a8e9e9308..306364a1ecda 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o + ci_dpm.o dce6_afmt.o # add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 092275d53d4a..dfac7965ea28 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) int atombios_get_encoder_mode(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; @@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; @@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_HDMIA: default: if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; @@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 692e31b95d34..2b6049d55233 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7004,6 +7004,10 @@ static int cik_startup(struct radeon_device *rdev) return r; } + r = dce6_audio_init(rdev); + if (r) + return r; + return 0; } @@ -7049,6 +7053,7 @@ int cik_resume(struct radeon_device *rdev) */ int cik_suspend(struct radeon_device *rdev) { + dce6_audio_fini(rdev); radeon_vm_manager_fini(rdev); cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c new file mode 100644 index 000000000000..0d9a6a21088c --- /dev/null +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -0,0 +1,251 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include "radeon.h" +#include "sid.h" + +static u32 dce6_endpoint_rreg(struct radeon_device *rdev, + u32 block_offset, u32 reg) +{ + u32 r; + + WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); + return r; +} + +static void dce6_endpoint_wreg(struct radeon_device *rdev, + u32 block_offset, u32 reg, u32 v) +{ + if (ASIC_IS_DCE8(rdev)) + WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + else + WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, + AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); + WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); +} + +#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) +#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v)) + + +static void dce6_afmt_get_connected_pins(struct radeon_device *rdev) +{ + int i; + u32 offset, tmp; + + for (i = 0; i < rdev->audio.num_pins; i++) { + offset = rdev->audio.pin[i].offset; + tmp = RREG32_ENDPOINT(offset, + AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); + if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1) + rdev->audio.pin[i].connected = false; + else + rdev->audio.pin[i].connected = true; + } +} + +struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev) +{ + int i; + + dce6_afmt_get_connected_pins(rdev); + + for (i = 0; i < rdev->audio.num_pins; i++) { + if (rdev->audio.pin[i].connected) + return &rdev->audio.pin[i]; + } + DRM_ERROR("No connected audio pins found!\n"); + return NULL; +} + +void dce6_afmt_select_pin(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + u32 offset = dig->afmt->offset; + u32 id = dig->afmt->pin->id; + + if (!dig->afmt->pin) + return; + + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); +} + +void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + u32 offset, tmp; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + struct cea_sad *sads; + int i, sad_count, sadb_count; + u8 *sadb; + + static const u16 eld_reg_to_type[][2] = { + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + if (!dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + radeon_connector = to_radeon_connector(connector); + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); + if (sad_count < 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + sadb_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + if (sadb_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); + return; + } + + /* program the speaker allocation */ + tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= HDMI_CONNECTION; + if (sadb_count) + tmp |= SPEAKER_ALLOCATION(sadb[0]); + else + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ + WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 value = 0; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + value = MAX_CHANNELS(sad->channels) | + DESCRIPTOR_BYTE_2(sad->byte2) | + SUPPORTED_FREQUENCIES(sad->freq); + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); + break; + } + } + WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); + } + + kfree(sads); + kfree(sadb); +} + +static int dce6_audio_chipset_supported(struct radeon_device *rdev) +{ + return !ASIC_IS_NODCE(rdev); +} + +static void dce6_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) +{ + WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, + AUDIO_ENABLED); + DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); +} + +static const u32 pin_offsets[7] = +{ + (0x5e00 - 0x5e00), + (0x5e18 - 0x5e00), + (0x5e30 - 0x5e00), + (0x5e48 - 0x5e00), + (0x5e60 - 0x5e00), + (0x5e78 - 0x5e00), + (0x5e90 - 0x5e00), +}; + +int dce6_audio_init(struct radeon_device *rdev) +{ + int i; + + if (!radeon_audio || !dce6_audio_chipset_supported(rdev)) + return 0; + + rdev->audio.enabled = true; + + if (ASIC_IS_DCE8(rdev)) + rdev->audio.num_pins = 7; + else + rdev->audio.num_pins = 6; + + for (i = 0; i < rdev->audio.num_pins; i++) { + rdev->audio.pin[i].channels = -1; + rdev->audio.pin[i].rate = -1; + rdev->audio.pin[i].bits_per_sample = -1; + rdev->audio.pin[i].status_bits = 0; + rdev->audio.pin[i].category_code = 0; + rdev->audio.pin[i].connected = false; + rdev->audio.pin[i].offset = pin_offsets[i]; + rdev->audio.pin[i].id = i; + dce6_audio_enable(rdev, &rdev->audio.pin[i], true); + } + + return 0; +} + +void dce6_audio_fini(struct radeon_device *rdev) +{ + int i; + + if (!rdev->audio.enabled) + return; + + for (i = 0; i < rdev->audio.num_pins; i++) + dce6_audio_enable(rdev, &rdev->audio.pin[i], false); + + rdev->audio.enabled = false; +} diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b0e280058b9b..c5acdf0a301a 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -32,6 +32,9 @@ #include "evergreend.h" #include "atom.h" +extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder); +extern void dce6_afmt_select_pin(struct drm_encoder *encoder); + /* * update the N and CTS parameters for a given pixel clock rate */ @@ -157,22 +160,26 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) if (!dig || !dig->afmt) return; - if (max_ratio >= 8) { - dto_phase = 192 * 1000; - wallclock_ratio = 3; - } else if (max_ratio >= 4) { - dto_phase = 96 * 1000; - wallclock_ratio = 2; - } else if (max_ratio >= 2) { - dto_phase = 48 * 1000; - wallclock_ratio = 1; - } else { + if (ASIC_IS_DCE6(rdev)) { dto_phase = 24 * 1000; - wallclock_ratio = 0; + } else { + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); } - dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; - dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); - WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational @@ -266,7 +273,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode AFMT_AUDIO_CHANNEL_ENABLE(0xff)); /* fglrx sets 0x40 in 0x5f80 here */ - evergreen_hdmi_write_sad_regs(encoder); + + if (ASIC_IS_DCE6(rdev)) { + dce6_afmt_select_pin(encoder); + dce6_afmt_write_sad_regs(encoder); + } else { + evergreen_hdmi_write_sad_regs(encoder); + } err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { @@ -302,6 +315,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -314,6 +329,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; + if (enable) { + if (ASIC_IS_DCE6(rdev)) + dig->afmt->pin = dce6_audio_get_pin(rdev); + else + dig->afmt->pin = r600_audio_get_pin(rdev); + } else { + dig->afmt->pin = NULL; + } + dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 2db8ce0023ac..69499fff06b0 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2027,9 +2027,15 @@ static int cayman_startup(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) - return r; + if (ASIC_IS_DCE6(rdev)) { + r = dce6_audio_init(rdev); + if (r) + return r; + } else { + r = r600_audio_init(rdev); + if (r) + return r; + } return 0; } @@ -2060,7 +2066,10 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { - r600_audio_fini(rdev); + if (ASIC_IS_DCE6(rdev)) + dce6_audio_fini(rdev); + else + r600_audio_fini(rdev); radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index c92eb86a8e55..47fc2b886979 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder) */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev); + return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev); } -struct r600_audio r600_audio_status(struct radeon_device *rdev) +struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) { - struct r600_audio status; + struct r600_audio_pin status; uint32_t value; value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); @@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work) struct radeon_device *rdev = container_of(work, struct radeon_device, audio_work); struct drm_device *dev = rdev->ddev; - struct r600_audio audio_status = r600_audio_status(rdev); + struct r600_audio_pin audio_status = r600_audio_status(rdev); struct drm_encoder *encoder; bool changed = false; - if (rdev->audio_status.channels != audio_status.channels || - rdev->audio_status.rate != audio_status.rate || - rdev->audio_status.bits_per_sample != audio_status.bits_per_sample || - rdev->audio_status.status_bits != audio_status.status_bits || - rdev->audio_status.category_code != audio_status.category_code) { - rdev->audio_status = audio_status; + if (rdev->audio.pin[0].channels != audio_status.channels || + rdev->audio.pin[0].rate != audio_status.rate || + rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample || + rdev->audio.pin[0].status_bits != audio_status.status_bits || + rdev->audio.pin[0].category_code != audio_status.category_code) { + rdev->audio.pin[0] = audio_status; changed = true; } @@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work) } } -/* - * turn on/off audio engine - */ -static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) +/* enable the audio stream */ +static void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) { u32 value = 0; - DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling"); + if (ASIC_IS_DCE4(rdev)) { if (enable) { value |= 0x81000000; /* Required to enable audio */ @@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); } - rdev->audio_enabled = enable; + DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); } /* @@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev) if (!radeon_audio || !r600_audio_chipset_supported(rdev)) return 0; - r600_audio_engine_enable(rdev, true); + rdev->audio.enabled = true; + + rdev->audio.num_pins = 1; + rdev->audio.pin[0].channels = -1; + rdev->audio.pin[0].rate = -1; + rdev->audio.pin[0].bits_per_sample = -1; + rdev->audio.pin[0].status_bits = 0; + rdev->audio.pin[0].category_code = 0; + rdev->audio.pin[0].id = 0; - rdev->audio_status.channels = -1; - rdev->audio_status.rate = -1; - rdev->audio_status.bits_per_sample = -1; - rdev->audio_status.status_bits = 0; - rdev->audio_status.category_code = 0; + r600_audio_enable(rdev, &rdev->audio.pin[0], true); return 0; } @@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev) */ void r600_audio_fini(struct radeon_device *rdev) { - if (!rdev->audio_enabled) + if (!rdev->audio.enabled) return; - r600_audio_engine_enable(rdev, false); + r600_audio_enable(rdev, &rdev->audio.pin[0], false); + + rdev->audio.enabled = false; +} + +struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) +{ + /* only one pin on 6xx-NI */ + return &rdev->audio.pin[0]; } diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f264df5470f7..e1dec1339461 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -382,7 +382,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct r600_audio audio = r600_audio_status(rdev); + struct r600_audio_pin audio = r600_audio_status(rdev); uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; struct hdmi_audio_infoframe frame; uint32_t offset; @@ -491,6 +491,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; + if (enable) + dig->afmt->pin = r600_audio_get_pin(rdev); + else + dig->afmt->pin = NULL; + /* Older chipsets require setting HDMI and routing manually */ if (!ASIC_IS_DCE3(rdev)) { if (enable) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 791cc8de6395..82fef854b686 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -696,7 +696,7 @@ union radeon_irq_stat_regs { #define RADEON_MAX_HPD_PINS 6 #define RADEON_MAX_CRTCS 6 -#define RADEON_MAX_AFMT_BLOCKS 6 +#define RADEON_MAX_AFMT_BLOCKS 7 struct radeon_irq { bool installed; @@ -1537,12 +1537,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, unsigned cg_upll_func_cntl); -struct r600_audio { +struct r600_audio_pin { int channels; int rate; int bits_per_sample; u8 status_bits; u8 category_code; + u32 offset; + bool connected; + u32 id; +}; + +struct r600_audio { + bool enabled; + struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS]; + int num_pins; }; /* @@ -2128,9 +2137,8 @@ struct radeon_device { struct work_struct reset_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ - bool audio_enabled; bool has_uvd; - struct r600_audio audio_status; /* audio stuff */ + struct r600_audio audio; /* audio stuff */ struct notifier_block acpi_nb; /* only one userspace can use Hyperz features or CMASK at a time */ struct drm_file *hyperz_filp; @@ -2594,6 +2602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, /* audio */ void r600_audio_update_hdmi(struct work_struct *work); +struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); +struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); /* * R600 vram scratch functions diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index da755bf37421..69f3122779c3 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1739,6 +1739,8 @@ static struct radeon_asic trinity_asic = { .wait_for_vblank = &dce4_wait_for_vblank, .set_backlight_level = &atombios_set_backlight_level, .get_backlight_level = &atombios_get_backlight_level, + .hdmi_enable = &evergreen_hdmi_enable, + .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { .blit = &r600_copy_cpdma, @@ -1867,6 +1869,8 @@ static struct radeon_asic si_asic = { .wait_for_vblank = &dce4_wait_for_vblank, .set_backlight_level = &atombios_set_backlight_level, .get_backlight_level = &atombios_get_backlight_level, + .hdmi_enable = &evergreen_hdmi_enable, + .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { .blit = NULL, @@ -2009,6 +2013,8 @@ static struct radeon_asic ci_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .hdmi_enable = &evergreen_hdmi_enable, + .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { .blit = NULL, @@ -2107,6 +2113,8 @@ static struct radeon_asic kv_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .hdmi_enable = &evergreen_hdmi_enable, + .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { .blit = NULL, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e69f00a7f153..818bbe6b884b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -379,7 +379,7 @@ void r600_disable_interrupts(struct radeon_device *rdev); void r600_rlc_stop(struct radeon_device *rdev); /* r600 audio */ int r600_audio_init(struct radeon_device *rdev); -struct r600_audio r600_audio_status(struct radeon_device *rdev); +struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); void r600_audio_fini(struct radeon_device *rdev); int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); @@ -628,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev, /* DCE6 - SI */ void dce6_bandwidth_update(struct radeon_device *rdev); +int dce6_audio_init(struct radeon_device *rdev); +void dce6_audio_fini(struct radeon_device *rdev); /* * si diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 31d9fbe85c72..af9cd6a57efc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1254,8 +1254,8 @@ static void radeon_afmt_init(struct radeon_device *rdev) for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) rdev->mode_info.afmt[i] = NULL; - if (ASIC_IS_DCE6(rdev)) { - /* todo */ + if (ASIC_IS_NODCE(rdev)) { + /* nothing to do */ } else if (ASIC_IS_DCE4(rdev)) { static uint32_t eg_offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET, @@ -1264,12 +1264,19 @@ static void radeon_afmt_init(struct radeon_device *rdev) EVERGREEN_CRTC3_REGISTER_OFFSET, EVERGREEN_CRTC4_REGISTER_OFFSET, EVERGREEN_CRTC5_REGISTER_OFFSET, + 0x13830 - 0x7030, }; int num_afmt; + /* DCE8 has 7 audio blocks tied to DIG encoders */ + /* DCE6 has 6 audio blocks tied to DIG encoders */ /* DCE4/5 has 6 audio blocks tied to DIG encoders */ /* DCE4.1 has 2 audio blocks tied to DIG encoders */ - if (ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE8(rdev)) + num_afmt = 7; + else if (ASIC_IS_DCE6(rdev)) + num_afmt = 6; + else if (ASIC_IS_DCE5(rdev)) num_afmt = 6; else if (ASIC_IS_DCE41(rdev)) num_afmt = 2; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8296632a4235..d908d8d68f6b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -225,6 +225,7 @@ struct radeon_afmt { int offset; bool last_buffer_filled_status; int id; + struct r600_audio_pin *pin; }; struct radeon_mode_info { @@ -233,7 +234,7 @@ struct radeon_mode_info { enum radeon_connector_table connector_table; bool mode_config_initialized; struct radeon_crtc *crtcs[6]; - struct radeon_afmt *afmt[6]; + struct radeon_afmt *afmt[7]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f5307e6bb92b..fb2058c9670d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6264,6 +6264,10 @@ static int si_startup(struct radeon_device *rdev) return r; } + r = dce6_audio_init(rdev); + if (r) + return r; + return 0; } @@ -6295,6 +6299,7 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { + dce6_audio_fini(rdev); radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 2c8da27a929f..968cf699c29e 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -635,6 +635,54 @@ #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 +/* DCE6 ELD audio interface */ +#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00 +# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0) +# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8) +#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04 + +#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25 +#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0) +#define SPEAKER_ALLOCATION_MASK (0x7f << 0) +#define SPEAKER_ALLOCATION_SHIFT 0 +#define HDMI_CONNECTION (1 << 16) +#define DP_CONNECTION (1 << 17) + +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */ +#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */ +# define MAX_CHANNELS(x) (((x) & 0x7) << 0) +/* max channels minus one. 7 = 8 channels */ +# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) +# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) +# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ +/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO + * bit0 = 32 kHz + * bit1 = 44.1 kHz + * bit2 = 48 kHz + * bit3 = 88.2 kHz + * bit4 = 96 kHz + * bit5 = 176.4 kHz + * bit6 = 192 kHz + */ +#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 +# define AUDIO_ENABLED (1 << 31) + +#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56 +#define PORT_CONNECTIVITY_MASK (3 << 30) +#define PORT_CONNECTIVITY_SHIFT 30 + #define DC_LB_MEMORY_SPLIT 0x6b0c #define DC_LB_MEMORY_CONFIG(x) ((x) << 20) @@ -755,6 +803,17 @@ /* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ #define CRTC_STATUS_FRAME_COUNT 0x6e98 +#define AFMT_AUDIO_SRC_CONTROL 0x713c +#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) +/* AFMT_AUDIO_SRC_SELECT + * 0 = stream0 + * 1 = stream1 + * 2 = stream2 + * 3 = stream3 + * 4 = stream4 + * 5 = stream5 + */ + #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) -- cgit v1.2.3 From 6159b65a5f4e04773e62e57a785df2452ddde1bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 15 Aug 2013 11:16:30 +0200 Subject: drm/radeon: set speakers allocation earlier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do it before enabling audio channels (in AFMT_AUDIO_PACKET_CONTROL2 register). Signed-off-by: Rafał Miłecki Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/dce6_afmt.c | 69 +++++++++++++++++++++++---------- drivers/gpu/drm/radeon/evergreen_hdmi.c | 7 +++- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 0d9a6a21088c..8953255e894b 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -94,17 +94,62 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); } -void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) +void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; u32 offset, tmp; + u8 *sadb; + int sad_count; + + if (!dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + radeon_connector = to_radeon_connector(connector); + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + return; + } + + /* program the speaker allocation */ + tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= HDMI_CONNECTION; + if (sad_count) + tmp |= SPEAKER_ALLOCATION(sadb[0]); + else + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ + WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + +void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + u32 offset; struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; struct cea_sad *sads; - int i, sad_count, sadb_count; - u8 *sadb; + int i, sad_count; static const u16 eld_reg_to_type[][2] = { { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, @@ -143,23 +188,6 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) } BUG_ON(!sads); - sadb_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sadb_count < 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); - return; - } - - /* program the speaker allocation */ - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); - tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); - /* set HDMI mode */ - tmp |= HDMI_CONNECTION; - if (sadb_count) - tmp |= SPEAKER_ALLOCATION(sadb[0]); - else - tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 value = 0; int j; @@ -180,7 +208,6 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) } kfree(sads); - kfree(sadb); } static int dce6_audio_chipset_supported(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index c5acdf0a301a..2cb0f90126cb 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -32,6 +32,7 @@ #include "evergreend.h" #include "atom.h" +extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder); extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder); extern void dce6_afmt_select_pin(struct drm_encoder *encoder); @@ -267,7 +268,11 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode AFMT_60958_CS_CHANNEL_NUMBER_6(7) | AFMT_60958_CS_CHANNEL_NUMBER_7(8)); - /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */ + if (ASIC_IS_DCE6(rdev)) { + dce6_afmt_write_speaker_allocation(encoder); + } else { + /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */ + } WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, AFMT_AUDIO_CHANNEL_ENABLE(0xff)); -- cgit v1.2.3 From ba7def4fac1d897198949cdf9a7cf15916bcf032 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Aug 2013 09:34:07 -0400 Subject: drm/radeon: set speaker allocation for DCE4/5 (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This updates the audio driver to the speaker allocation block from the EDID. A similar change was just implemented for DCE6/8. v2: remove unused variables Signed-off-by: Alex Deucher Acked-by: Rafał Miłecki --- drivers/gpu/drm/radeon/evergreen_hdmi.c | 41 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/radeon/evergreend.h | 7 ++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 2cb0f90126cb..f71ce390aebe 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -58,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); } +static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + u32 tmp; + u8 *sadb; + int sad_count; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + radeon_connector = to_radeon_connector(connector); + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + return; + } + + /* program the speaker allocation */ + tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= HDMI_CONNECTION; + if (sad_count) + tmp |= SPEAKER_ALLOCATION(sadb[0]); + else + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ + WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; @@ -271,7 +310,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode if (ASIC_IS_DCE6(rdev)) { dce6_afmt_write_speaker_allocation(encoder); } else { - /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */ + dce4_afmt_write_speaker_allocation(encoder); } WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 0d582ac1dc31..430997a70acc 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -714,6 +714,13 @@ #define AFMT_GENERIC0_7 0x7138 /* DCE4/5 ELD audio interface */ +#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78 +#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0) +#define SPEAKER_ALLOCATION_MASK (0x7f << 0) +#define SPEAKER_ALLOCATION_SHIFT 0 +#define HDMI_CONNECTION (1 << 16) +#define DP_CONNECTION (1 << 17) + #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */ -- cgit v1.2.3 From 0ffae60c8976fb407de04cebd8c4cfae932bc671 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Aug 2013 12:03:37 -0400 Subject: drm/radeon: set speaker allocation for DCE3.2 This updates the audio driver to the speaker allocation block from the EDID. A similar change was just implemented for DCE4-8. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_hdmi.c | 42 ++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/r600d.h | 7 +++++++ 2 files changed, 49 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e1dec1339461..6d7128d02493 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -283,6 +283,45 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) } } +static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + u32 tmp; + u8 *sadb; + int sad_count; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + radeon_connector = to_radeon_connector(connector); + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + return; + } + + /* program the speaker allocation */ + tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= HDMI_CONNECTION; + if (sad_count) + tmp |= SPEAKER_ALLOCATION(sadb[0]); + else + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ + WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + /* * update the info frames with the data from the current display mode */ @@ -327,6 +366,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ } + if (ASIC_IS_DCE32(rdev)) + dce3_2_afmt_write_speaker_allocation(encoder); + WREG32(HDMI0_ACR_PACKET_CONTROL + offset, HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ HDMI0_ACR_SOURCE); /* select SW CTS value */ diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 7c780839a7f4..44ec7a148c3d 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -960,6 +960,13 @@ # define DIG_MODE_SDVO 4 #define DIG1_CNTL 0x79a0 +#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc +#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0) +#define SPEAKER_ALLOCATION_MASK (0x7f << 0) +#define SPEAKER_ALLOCATION_SHIFT 0 +#define HDMI_CONNECTION (1 << 16) +#define DP_CONNECTION (1 << 17) + /* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly * different due to the new DIG blocks, but also have 2 instances. -- cgit v1.2.3 From 64d8a728c7deb40e8db3c09b614ffe90415c7664 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 8 Aug 2013 16:31:25 -0400 Subject: drm/radeon: add cg and pg flags This commits adds flags for supported clockgating and powergating features. This allows us to more easily track which features are supported on a particular asic and to enable/disable features for debugging. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 82fef854b686..289047e12ef2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -160,6 +160,38 @@ extern int radeon_aspm; #define RADEON_CG_BLOCK_VCE (1 << 4) #define RADEON_CG_BLOCK_HDP (1 << 5) +/* CG flags */ +#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0) +#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1) +#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2) +#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3) +#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4) +#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5) +#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6) +#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7) +#define RADEON_CG_SUPPORT_MC_LS (1 << 8) +#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9) +#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10) +#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11) +#define RADEON_CG_SUPPORT_BIF_LS (1 << 12) +#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13) +#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14) +#define RADEON_CG_SUPPORT_HDP_LS (1 << 15) +#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) + +/* PG flags */ +#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) +#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) +#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) +#define RADEON_PG_SUPPORT_UVD (1 << 3) +#define RADEON_PG_SUPPORT_VCE (1 << 4) +#define RADEON_PG_SUPPORT_CP (1 << 5) +#define RADEON_PG_SUPPORT_GDS (1 << 6) +#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7) +#define RADEON_PG_SUPPORT_SDMA (1 << 8) +#define RADEON_PG_SUPPORT_ACP (1 << 9) +#define RADEON_PG_SUPPORT_SAMU (1 << 10) + /* max cursor sizes (in pixels) */ #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 @@ -2156,6 +2188,9 @@ struct radeon_device { struct radeon_atcs atcs; /* srbm instance registers */ struct mutex srbm_mutex; + /* clock, powergating flags */ + u32 cg_flags; + u32 pg_flags; }; int radeon_device_init(struct radeon_device *rdev, -- cgit v1.2.3 From 0116e1efafe09a2d99042943a850deaa1d9b069c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 8 Aug 2013 18:00:10 -0400 Subject: drm/radeon: use new cg/pg flags for SI Allows us finer grained control over clock and powergating on SI. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 98 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/si.c | 59 +++++++++++----------- 2 files changed, 128 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 69f3122779c3..dcdf5e07490d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2335,6 +2335,104 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->has_uvd = false; else rdev->has_uvd = true; + switch (rdev->family) { + case CHIP_TAHITI: + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + RADEON_CG_SUPPORT_GFX_CGCG | + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + break; + case CHIP_PITCAIRN: + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + RADEON_CG_SUPPORT_GFX_CGCG | + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_GFX_RLC_LS | + RADEON_CG_SUPPORT_MC_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + break; + case CHIP_VERDE: + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + RADEON_CG_SUPPORT_GFX_CGCG | + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_GFX_RLC_LS | + RADEON_CG_SUPPORT_MC_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + /*RADEON_PG_SUPPORT_GFX_CG | + RADEON_PG_SUPPORT_SDMA;*/ + break; + case CHIP_OLAND: + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + RADEON_CG_SUPPORT_GFX_CGCG | + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_GFX_RLC_LS | + RADEON_CG_SUPPORT_MC_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + break; + case CHIP_HAINAN: + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + RADEON_CG_SUPPORT_GFX_CGCG | + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_GFX_RLC_LS | + RADEON_CG_SUPPORT_MC_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + break; + default: + rdev->cg_flags = 0; + rdev->pg_flags = 0; + break; + } break; case CHIP_BONAIRE: rdev->asic = &ci_asic; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index fb2058c9670d..e116128f3d8f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5121,39 +5121,44 @@ static void si_enable_mc_ls(struct radeon_device *rdev, static void si_init_cg(struct radeon_device *rdev) { - si_enable_mgcg(rdev, true); - si_enable_cgcg(rdev, false); - /* disable MC LS on Tahiti */ - if (rdev->family == CHIP_TAHITI) + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG) + si_enable_mgcg(rdev, true); + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG) + si_enable_cgcg(rdev, false/*true*/); + /* Disable MC LS on tahiti */ + if (!(rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) si_enable_mc_ls(rdev, false); if (rdev->has_uvd) { - si_enable_uvd_mgcg(rdev, true); + if (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG) + si_enable_uvd_mgcg(rdev, true); si_init_uvd_internal_cg(rdev); } } static void si_fini_cg(struct radeon_device *rdev) { - if (rdev->has_uvd) - si_enable_uvd_mgcg(rdev, false); - si_enable_cgcg(rdev, false); - si_enable_mgcg(rdev, false); + if (rdev->has_uvd) { + if (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG) + si_enable_uvd_mgcg(rdev, false); + } + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG) + si_enable_cgcg(rdev, false); + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG) + si_enable_mgcg(rdev, false); } static void si_init_pg(struct radeon_device *rdev) { - bool has_pg = false; -#if 0 - /* only cape verde supports PG */ - if (rdev->family == CHIP_VERDE) - has_pg = true; -#endif - if (has_pg) { + if (rdev->pg_flags) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { + si_init_dma_pg(rdev); + si_enable_dma_pg(rdev, true); + } si_init_ao_cu_mask(rdev); - si_init_dma_pg(rdev); - si_enable_dma_pg(rdev, true); - si_init_gfx_cgpg(rdev); - si_enable_gfx_cgpg(rdev, true); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + si_init_gfx_cgpg(rdev); + si_enable_gfx_cgpg(rdev, true); + } } else { WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); @@ -5162,15 +5167,11 @@ static void si_init_pg(struct radeon_device *rdev) static void si_fini_pg(struct radeon_device *rdev) { - bool has_pg = false; - - /* only cape verde supports PG */ - if (rdev->family == CHIP_VERDE) - has_pg = true; - - if (has_pg) { - si_enable_dma_pg(rdev, false); - si_enable_gfx_cgpg(rdev, false); + if (rdev->pg_flags) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) + si_enable_dma_pg(rdev, false); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) + si_enable_gfx_cgpg(rdev, false); } } -- cgit v1.2.3 From e16866ecfbfabc546fe8f02fdf4359707f81e81e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 8 Aug 2013 19:34:07 -0400 Subject: drm/radeon/si: restructure cg code (v3) Resturcture clockgating code so that it can be enabled/disabled from other components such as dpm. v2: make function static v3: add fine grained cg controls Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_asic.c | 10 +- drivers/gpu/drm/radeon/si.c | 207 ++++++++++++++++++++++++++++++----- drivers/gpu/drm/radeon/sid.h | 8 ++ 4 files changed, 192 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 289047e12ef2..8cd87bac0486 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -159,6 +159,7 @@ extern int radeon_aspm; #define RADEON_CG_BLOCK_UVD (1 << 3) #define RADEON_CG_BLOCK_VCE (1 << 4) #define RADEON_CG_BLOCK_HDP (1 << 5) +#define RADEON_CG_BLOCK_BIF (1 << 6) /* CG flags */ #define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index dcdf5e07490d..3bd96cdb7601 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2340,7 +2340,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | @@ -2357,7 +2357,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | @@ -2376,7 +2376,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | @@ -2397,7 +2397,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | @@ -2415,7 +2415,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e116128f3d8f..b1d22c704c53 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4844,7 +4844,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable) u32 data, orig; orig = data = RREG32(DMA_PG); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA)) data |= PG_CNTL_ENABLE; else data &= ~PG_CNTL_ENABLE; @@ -4868,7 +4868,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev, { u32 tmp; - if (enable) { + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); WREG32(RLC_TTOP_D, tmp); @@ -4973,7 +4973,7 @@ static void si_enable_cgcg(struct radeon_device *rdev, si_enable_gui_idle_interrupt(rdev, enable); - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { WREG32(RLC_GCPM_GENERAL_3, 0x00000080); tmp = si_halt_rlc(rdev); @@ -5007,16 +5007,18 @@ static void si_enable_mgcg(struct radeon_device *rdev, { u32 data, orig, tmp = 0; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { orig = data = RREG32(CGTS_SM_CTRL_REG); data = 0x96940200; if (orig != data) WREG32(CGTS_SM_CTRL_REG, data); - orig = data = RREG32(CP_MEM_SLP_CNTL); - data |= CP_MEM_LS_EN; - if (orig != data) - WREG32(CP_MEM_SLP_CNTL, data); + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + } orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); data &= 0xffffffc0; @@ -5061,7 +5063,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev, { u32 orig, data, tmp; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); tmp |= 0x3fff; WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); @@ -5109,7 +5111,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) data |= MC_LS_ENABLE; else data &= ~MC_LS_ENABLE; @@ -5118,19 +5120,158 @@ static void si_enable_mc_ls(struct radeon_device *rdev, } } +static void si_enable_mc_mgcg(struct radeon_device *rdev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) + data |= MC_CG_ENABLE; + else + data &= ~MC_CG_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void si_enable_dma_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data, offset; + int i; + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data &= ~MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + WREG32(DMA_CLK_CTRL + offset, 0x00000100); + } + } else { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data |= MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + + orig = data = RREG32(DMA_CLK_CTRL + offset); + data = 0xff000000; + if (data != orig) + WREG32(DMA_CLK_CTRL + offset, data); + } + } +} + +static void si_enable_bif_mgls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32_PCIE(PCIE_CNTL2); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN; + else + data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN); + + if (orig != data) + WREG32_PCIE(PCIE_CNTL2, data); +} + +static void si_enable_hdp_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_HOST_PATH_CNTL); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) + data &= ~CLOCK_GATING_DIS; + else + data |= CLOCK_GATING_DIS; + + if (orig != data) + WREG32(HDP_HOST_PATH_CNTL, data); +} + +static void si_enable_hdp_ls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_MEM_POWER_LS); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) + data |= HDP_LS_ENABLE; + else + data &= ~HDP_LS_ENABLE; + + if (orig != data) + WREG32(HDP_MEM_POWER_LS, data); +} + +void si_update_cg(struct radeon_device *rdev, + u32 block, bool enable) +{ + if (block & RADEON_CG_BLOCK_GFX) { + /* order matters! */ + if (enable) { + si_enable_mgcg(rdev, true); + si_enable_cgcg(rdev, true); + } else { + si_enable_cgcg(rdev, false); + si_enable_mgcg(rdev, false); + } + } + + if (block & RADEON_CG_BLOCK_MC) { + si_enable_mc_mgcg(rdev, enable); + si_enable_mc_ls(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_SDMA) { + si_enable_dma_mgcg(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_BIF) { + si_enable_bif_mgls(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_UVD) { + if (rdev->has_uvd) { + si_enable_uvd_mgcg(rdev, enable); + } + } + + if (block & RADEON_CG_BLOCK_HDP) { + si_enable_hdp_mgcg(rdev, enable); + si_enable_hdp_ls(rdev, enable); + } +} static void si_init_cg(struct radeon_device *rdev) { - if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG) - si_enable_mgcg(rdev, true); - if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG) - si_enable_cgcg(rdev, false/*true*/); - /* Disable MC LS on tahiti */ - if (!(rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) - si_enable_mc_ls(rdev, false); + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), true); if (rdev->has_uvd) { - if (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG) - si_enable_uvd_mgcg(rdev, true); + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); si_init_uvd_internal_cg(rdev); } } @@ -5138,13 +5279,20 @@ static void si_init_cg(struct radeon_device *rdev) static void si_fini_cg(struct radeon_device *rdev) { if (rdev->has_uvd) { - if (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG) - si_enable_uvd_mgcg(rdev, false); + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); } - if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG) - si_enable_cgcg(rdev, false); - if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG) - si_enable_mgcg(rdev, false); + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); +} + +void si_update_pg(struct radeon_device *rdev, + bool enable) +{ + si_enable_dma_pg(rdev, enable); + si_enable_gfx_cgpg(rdev, enable); } static void si_init_pg(struct radeon_device *rdev) @@ -5152,13 +5300,12 @@ static void si_init_pg(struct radeon_device *rdev) if (rdev->pg_flags) { if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { si_init_dma_pg(rdev); - si_enable_dma_pg(rdev, true); } si_init_ao_cu_mask(rdev); if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { si_init_gfx_cgpg(rdev); - si_enable_gfx_cgpg(rdev, true); } + si_update_pg(rdev, false); } else { WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); @@ -6308,6 +6455,8 @@ int si_suspend(struct radeon_device *rdev) uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); } + si_fini_pg(rdev); + si_fini_cg(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -6439,10 +6588,10 @@ void si_fini(struct radeon_device *rdev) { si_cp_fini(rdev); cayman_dma_fini(rdev); + si_fini_pg(rdev); + si_fini_cg(rdev); si_irq_fini(rdev); sumo_rlc_fini(rdev); - si_fini_cg(rdev); - si_fini_pg(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 968cf699c29e..91dae16fddc4 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -581,6 +581,7 @@ #define CLKS_MASK (0xfff << 0) #define HDP_HOST_PATH_CNTL 0x2C00 +#define CLOCK_GATING_DIS (1 << 23) #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C @@ -588,6 +589,8 @@ #define HDP_ADDR_CONFIG 0x2F48 #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) +#define HDP_MEM_POWER_LS 0x2F50 +#define HDP_LS_ENABLE (1 << 0) #define ATC_MISC_CG 0x3350 @@ -1354,6 +1357,7 @@ /* PCIE registers idx/data 0x30/0x34 */ #define PCIE_CNTL2 0x1c /* PCIE */ # define SLV_MEM_LS_EN (1 << 16) +# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17) # define MST_MEM_LS_EN (1 << 18) # define REPLAY_MEM_LS_EN (1 << 19) #define PCIE_LC_STATUS1 0x28 /* PCIE */ @@ -1703,6 +1707,10 @@ # define DMA_IDLE (1 << 0) #define DMA_TILING_CONFIG 0xd0b8 +#define DMA_POWER_CNTL 0xd0bc +# define MEM_POWER_OVERRIDE (1 << 8) +#define DMA_CLK_CTRL 0xd0c0 + #define DMA_PG 0xd0d4 # define PG_CNTL_ENABLE (1 << 0) #define DMA_PGFSM_CONFIG 0xd0d8 -- cgit v1.2.3 From 5594a558faca933f64277c2033bd724968cd3d89 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Aug 2013 16:20:26 -0400 Subject: drm/radeon: fixes for gfx clockgating on SI Clockgating requires signalling between the CP and the RLC to work properly. Resetting the CP block in the CP resume code messed up the internal coordination between the blocks. Removing the reset allows gfx clockgating to work properly. However, when gfx clock gating is enabled, there is a strange interaction with dpm which causes the chip to stay in the high performance level all the time, so leave gfx clockgating disabled for now. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 10 +++++----- drivers/gpu/drm/radeon/si.c | 17 ++++------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3bd96cdb7601..52fe0d4eeaa0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2338,7 +2338,7 @@ int radeon_asic_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_TAHITI: rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2355,7 +2355,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_PITCAIRN: rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2374,7 +2374,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_VERDE: rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2395,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_OLAND: rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2413,7 +2413,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_HAINAN: rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b1d22c704c53..ff48c88c9133 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3371,17 +3371,6 @@ static int si_cp_resume(struct radeon_device *rdev) u32 rb_bufsz; int r; - /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ - WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | - SOFT_RESET_PA | - SOFT_RESET_VGT | - SOFT_RESET_SPI | - SOFT_RESET_SX)); - RREG32(GRBM_SOFT_RESET); - mdelay(15); - WREG32(GRBM_SOFT_RESET, 0); - RREG32(GRBM_SOFT_RESET); - WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); @@ -4971,9 +4960,9 @@ static void si_enable_cgcg(struct radeon_device *rdev, orig = data = RREG32(RLC_CGCG_CGLS_CTRL); - si_enable_gui_idle_interrupt(rdev, enable); - if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { + si_enable_gui_idle_interrupt(rdev, true); + WREG32(RLC_GCPM_GENERAL_3, 0x00000080); tmp = si_halt_rlc(rdev); @@ -4990,6 +4979,8 @@ static void si_enable_cgcg(struct radeon_device *rdev, data |= CGCG_EN | CGLS_EN; } else { + si_enable_gui_idle_interrupt(rdev, false); + RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); -- cgit v1.2.3 From 4cb0add259179ca8634fc0fddb2274534a58ff2d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 17:24:08 -0400 Subject: drm/radeon: handle cg in SI dpm code Clockgating needs to be disabled around certain parts of dpm setup otherwise the smc gets into a bad state and dpm doesn't work properly. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/si_dpm.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 96d96f5df9e7..75a435f14380 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1753,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev, u32 engine_clock, SISLANDS_SMC_SCLK_VALUE *sclk); +extern void si_update_cg(struct radeon_device *rdev, + u32 block, bool enable); + static struct si_power_info *si_get_pi(struct radeon_device *rdev) { struct si_power_info *pi = rdev->pm.dpm.priv; @@ -5759,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev) struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; int ret; + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + if (si_is_smc_running(rdev)) return -EINVAL; if (pi->voltage_control) @@ -5878,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev) si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), true); + ni_update_current_ps(rdev, boot_ps); return 0; @@ -5888,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev) struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + if (!si_is_smc_running(rdev)) return; si_disable_ulv(rdev); @@ -5952,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev) struct radeon_ps *old_ps = &eg_pi->current_rps; int ret; + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + ret = si_disable_ulv(rdev); if (ret) { DRM_ERROR("si_disable_ulv failed\n"); @@ -6050,6 +6081,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev) return ret; } + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), true); + return 0; } -- cgit v1.2.3 From 090f4b6ad38eec0c24dcdc9d01cc22077c3b9d22 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 18:53:56 -0400 Subject: drm/radeon: enable mgcg on SI Now that the CP is no longer reset and cg is properly disabled in when appropriate in the dpm code we can now enable mgcg (medium grained clockgating). Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 52fe0d4eeaa0..3bd96cdb7601 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2338,7 +2338,7 @@ int radeon_asic_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_TAHITI: rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2355,7 +2355,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_PITCAIRN: rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2374,7 +2374,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_VERDE: rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2395,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_OLAND: rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2413,7 +2413,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_HAINAN: rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | -- cgit v1.2.3 From 59a82d0e65539812652dffea160ef483c1d310f5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 13 Aug 2013 12:48:06 -0400 Subject: drm/radeon/si: properly set up the clearstate buffer for pg (v2) The format of the clearstate buffer used for pg (powergating) changed between NI and SI. This formats it properly for what the hardware expects on SI. v2: fix addresses Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 93 ++++++++++++++++++++++---------------- drivers/gpu/drm/radeon/si.c | 91 +++++++++++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bbaa4f2056ce..536908109001 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -142,6 +142,9 @@ extern void cayman_vm_decode_fault(struct radeon_device *rdev, u32 status, u32 addr); void cik_init_cp_pg_table(struct radeon_device *rdev); +extern u32 si_get_csb_size(struct radeon_device *rdev); +extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); + static const u32 evergreen_golden_registers[] = { 0x3f90, 0xffff0000, 0xff000000, @@ -3893,7 +3896,7 @@ int sumo_rlc_init(struct radeon_device *rdev) const u32 *src_ptr; volatile u32 *dst_ptr; u32 dws, data, i, j, k, reg_num; - u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; + u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0; u64 reg_list_mc_addr; const struct cs_section_def *cs_data; int r; @@ -3937,7 +3940,7 @@ int sumo_rlc_init(struct radeon_device *rdev) dst_ptr = rdev->rlc.sr_ptr; if (rdev->family >= CHIP_TAHITI) { /* SI */ - for (i = 0; i < dws; i++) + for (i = 0; i < rdev->rlc.reg_list_size; i++) dst_ptr[i] = src_ptr[i]; } else { /* ON/LN/TN */ @@ -3963,20 +3966,25 @@ int sumo_rlc_init(struct radeon_device *rdev) if (cs_data) { /* clear state block */ - reg_list_num = 0; - dws = 0; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_list_num++; - dws += cs_data[i].section[j].reg_count; + if (rdev->family >= CHIP_TAHITI) { + rdev->rlc.clear_state_size = si_get_csb_size(rdev); + dws = rdev->rlc.clear_state_size + (256 / 4); + } else { + reg_list_num = 0; + dws = 0; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_list_num++; + dws += cs_data[i].section[j].reg_count; + } } + reg_list_blk_index = (3 * reg_list_num + 2); + dws += reg_list_blk_index; + rdev->rlc.clear_state_size = dws; } - reg_list_blk_index = (3 * reg_list_num + 2); - dws += reg_list_blk_index; - rdev->rlc.clear_state_size = dws; if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, rdev->rlc.clear_state_size * 4, PAGE_SIZE, true, + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); @@ -4006,36 +4014,43 @@ int sumo_rlc_init(struct radeon_device *rdev) } /* set up the cs buffer */ dst_ptr = rdev->rlc.cs_ptr; - reg_list_hdr_blk_index = 0; - reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); - data = upper_32_bits(reg_list_mc_addr); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_num = cs_data[i].section[j].reg_count; - data = reg_list_mc_addr & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = 0x08000000 | (reg_num * 4); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - for (k = 0; k < reg_num; k++) { - data = cs_data[i].section[j].extent[k]; - dst_ptr[reg_list_blk_index + k] = data; + if (rdev->family >= CHIP_TAHITI) { + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; + dst_ptr[0] = upper_32_bits(reg_list_mc_addr); + dst_ptr[1] = lower_32_bits(reg_list_mc_addr); + dst_ptr[2] = rdev->rlc.clear_state_size; + si_get_csb_buffer(rdev, &dst_ptr[(256/4)]); + } else { + reg_list_hdr_blk_index = 0; + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); + data = upper_32_bits(reg_list_mc_addr); + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_num = cs_data[i].section[j].reg_count; + data = reg_list_mc_addr & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + data = 0x08000000 | (reg_num * 4); + dst_ptr[reg_list_hdr_blk_index] = data; + reg_list_hdr_blk_index++; + + for (k = 0; k < reg_num; k++) { + data = cs_data[i].section[j].extent[k]; + dst_ptr[reg_list_blk_index + k] = data; + } + reg_list_mc_addr += reg_num * 4; + reg_list_blk_index += reg_num; } - reg_list_mc_addr += reg_num * 4; - reg_list_blk_index += reg_num; } + dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; } - dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; - radeon_bo_kunmap(rdev->rlc.clear_state_obj); radeon_bo_unreserve(rdev->rlc.clear_state_obj); } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ff48c88c9133..ebe04e45e5c8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5286,6 +5286,97 @@ void si_update_pg(struct radeon_device *rdev, si_enable_gfx_cgpg(rdev, enable); } +u32 si_get_csb_size(struct radeon_device *rdev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config */ + count += 3; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); + buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; + + buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); + buffer[count++] = 0x80000000; + buffer[count++] = 0x80000000; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); + buffer[count++] = ext->reg_index - 0xa000; + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = ext->extent[i]; + } else { + return; + } + } + } + + buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1); + buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; + switch (rdev->family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + buffer[count++] = 0x2a00126a; + break; + case CHIP_VERDE: + buffer[count++] = 0x0000124a; + break; + case CHIP_OLAND: + buffer[count++] = 0x00000082; + break; + case CHIP_HAINAN: + buffer[count++] = 0x00000000; + break; + default: + buffer[count++] = 0x00000000; + break; + } + + buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); + buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; + + buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); + buffer[count++] = 0; +} + static void si_init_pg(struct radeon_device *rdev) { if (rdev->pg_flags) { -- cgit v1.2.3 From ca6ebb39df2be99f08fcf30335b4d52319ab9208 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 13 Aug 2013 13:18:37 -0400 Subject: drm/radeon/si: enable DMA pg by default Enable DMA powergating by default. The DMA engines will be powergated when not in use. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 6 +++--- drivers/gpu/drm/radeon/si.c | 16 ++++------------ 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3bd96cdb7601..78e9bbc8446b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2389,9 +2389,9 @@ int radeon_asic_init(struct radeon_device *rdev) RADEON_CG_SUPPORT_UVD_MGCG | RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_MGCG; - rdev->pg_flags = 0; - /*RADEON_PG_SUPPORT_GFX_CG | - RADEON_PG_SUPPORT_SDMA;*/ + rdev->pg_flags = 0 | + /*RADEON_PG_SUPPORT_GFX_CG | */ + RADEON_PG_SUPPORT_SDMA; break; case CHIP_OLAND: rdev->cg_flags = diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ebe04e45e5c8..d5f77eb23441 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5279,13 +5279,6 @@ static void si_fini_cg(struct radeon_device *rdev) RADEON_CG_BLOCK_HDP), false); } -void si_update_pg(struct radeon_device *rdev, - bool enable) -{ - si_enable_dma_pg(rdev, enable); - si_enable_gfx_cgpg(rdev, enable); -} - u32 si_get_csb_size(struct radeon_device *rdev) { u32 count = 0; @@ -5387,7 +5380,8 @@ static void si_init_pg(struct radeon_device *rdev) if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { si_init_gfx_cgpg(rdev); } - si_update_pg(rdev, false); + si_enable_dma_pg(rdev, true); + si_enable_gfx_cgpg(rdev, true); } else { WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); @@ -5397,10 +5391,8 @@ static void si_init_pg(struct radeon_device *rdev) static void si_fini_pg(struct radeon_device *rdev) { if (rdev->pg_flags) { - if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) - si_enable_dma_pg(rdev, false); - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) - si_enable_gfx_cgpg(rdev, false); + si_enable_dma_pg(rdev, false); + si_enable_gfx_cgpg(rdev, false); } } -- cgit v1.2.3 From 473359bc28e193031a76d99f71e8b6c4808719a6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 9 Aug 2013 11:18:39 -0400 Subject: drm/radeon: restructure cg/pg on cik (v2) - use new cg/pg flags for finer grained clock and powergating control - restructure the cg/pg code so it can be called from other components such as dpm v2: fix build breakage from rebase Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 165 +++++++++++++++++++++-------------- drivers/gpu/drm/radeon/cikd.h | 1 + drivers/gpu/drm/radeon/radeon_asic.c | 73 +++++++++++++++- 3 files changed, 170 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 2b6049d55233..b7859fe3df80 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5062,7 +5062,7 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable) cik_enable_gui_idle_interrupt(rdev, enable); - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { tmp = cik_halt_rlc(rdev); cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); @@ -5092,11 +5092,15 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) { u32 data, orig, tmp = 0; - if (enable) { - orig = data = RREG32(CP_MEM_SLP_CNTL); - data |= CP_MEM_LS_EN; - if (orig != data) - WREG32(CP_MEM_SLP_CNTL, data); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) { + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + } + } orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); data &= 0xfffffffd; @@ -5113,17 +5117,21 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) cik_update_rlc(rdev, tmp); - orig = data = RREG32(CGTS_SM_CTRL_REG); - data &= ~SM_MODE_MASK; - data |= SM_MODE(0x2); - data |= SM_MODE_ENABLE; - data &= ~CGTS_OVERRIDE; - data &= ~CGTS_LS_OVERRIDE; - data &= ~ON_MONITOR_ADD_MASK; - data |= ON_MONITOR_ADD_EN; - data |= ON_MONITOR_ADD(0x96); - if (orig != data) - WREG32(CGTS_SM_CTRL_REG, data); + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) { + orig = data = RREG32(CGTS_SM_CTRL_REG); + data &= ~SM_MODE_MASK; + data |= SM_MODE(0x2); + data |= SM_MODE_ENABLE; + data &= ~CGTS_OVERRIDE; + if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) && + (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS)) + data &= ~CGTS_LS_OVERRIDE; + data &= ~ON_MONITOR_ADD_MASK; + data |= ON_MONITOR_ADD_EN; + data |= ON_MONITOR_ADD(0x96); + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + } } else { orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); data |= 0x00000002; @@ -5180,7 +5188,7 @@ static void cik_enable_mc_ls(struct radeon_device *rdev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) data |= MC_LS_ENABLE; else data &= ~MC_LS_ENABLE; @@ -5197,7 +5205,7 @@ static void cik_enable_mc_mgcg(struct radeon_device *rdev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) data |= MC_CG_ENABLE; else data &= ~MC_CG_ENABLE; @@ -5211,7 +5219,7 @@ static void cik_enable_sdma_mgcg(struct radeon_device *rdev, { u32 orig, data; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); } else { @@ -5232,7 +5240,7 @@ static void cik_enable_sdma_mgls(struct radeon_device *rdev, { u32 orig, data; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) { orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); data |= 0x100; if (orig != data) @@ -5260,7 +5268,7 @@ static void cik_enable_uvd_mgcg(struct radeon_device *rdev, { u32 orig, data; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); data = 0xfff; WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data); @@ -5281,6 +5289,24 @@ static void cik_enable_uvd_mgcg(struct radeon_device *rdev, } } +static void cik_enable_bif_mgls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32_PCIE_PORT(PCIE_CNTL2); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN; + else + data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN); + + if (orig != data) + WREG32_PCIE_PORT(PCIE_CNTL2, data); +} + static void cik_enable_hdp_mgcg(struct radeon_device *rdev, bool enable) { @@ -5288,7 +5314,7 @@ static void cik_enable_hdp_mgcg(struct radeon_device *rdev, orig = data = RREG32(HDP_HOST_PATH_CNTL); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) data &= ~CLOCK_GATING_DIS; else data |= CLOCK_GATING_DIS; @@ -5304,7 +5330,7 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev, orig = data = RREG32(HDP_MEM_POWER_LS); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) data |= HDP_LS_ENABLE; else data &= ~HDP_LS_ENABLE; @@ -5339,6 +5365,10 @@ void cik_update_cg(struct radeon_device *rdev, cik_enable_sdma_mgls(rdev, enable); } + if (block & RADEON_CG_BLOCK_BIF) { + cik_enable_bif_mgls(rdev, enable); + } + if (block & RADEON_CG_BLOCK_UVD) { if (rdev->has_uvd) cik_enable_uvd_mgcg(rdev, enable); @@ -5360,17 +5390,29 @@ static void cik_init_cg(struct radeon_device *rdev) cik_update_cg(rdev, (RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_UVD | RADEON_CG_BLOCK_HDP), true); } +static void cik_fini_cg(struct radeon_device *rdev) +{ + cik_update_cg(rdev, (RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + + cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false); +} + static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev, bool enable) { u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS)) data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE; else data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE; @@ -5384,7 +5426,7 @@ static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev, u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS)) data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE; else data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE; @@ -5397,7 +5439,7 @@ static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable) u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP)) data &= ~DISABLE_CP_PG; else data |= DISABLE_CP_PG; @@ -5410,7 +5452,7 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable) u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS)) data &= ~DISABLE_GDS_PG; else data |= DISABLE_GDS_PG; @@ -5465,7 +5507,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev, { u32 data, orig; - if (enable) { + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { orig = data = RREG32(RLC_PG_CNTL); data |= GFX_PG_ENABLE; if (orig != data) @@ -5552,7 +5594,7 @@ static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev, u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG)) data |= STATIC_PER_CU_PG_ENABLE; else data &= ~STATIC_PER_CU_PG_ENABLE; @@ -5566,7 +5608,7 @@ static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev, u32 data, orig; orig = data = RREG32(RLC_PG_CNTL); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG)) data |= DYN_PER_CU_PG_ENABLE; else data &= ~DYN_PER_CU_PG_ENABLE; @@ -5628,52 +5670,37 @@ static void cik_init_gfx_cgpg(struct radeon_device *rdev) static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable) { - bool has_pg = false; - bool has_dyn_mgpg = false; - bool has_static_mgpg = false; - - /* only APUs have PG */ - if (rdev->flags & RADEON_IS_IGP) { - has_pg = true; - has_static_mgpg = true; - if (rdev->family == CHIP_KAVERI) - has_dyn_mgpg = true; - } - - if (has_pg) { - cik_enable_gfx_cgpg(rdev, enable); - if (enable) { - cik_enable_gfx_static_mgpg(rdev, has_static_mgpg); - cik_enable_gfx_dynamic_mgpg(rdev, has_dyn_mgpg); - } else { - cik_enable_gfx_static_mgpg(rdev, false); - cik_enable_gfx_dynamic_mgpg(rdev, false); - } - } - + cik_enable_gfx_cgpg(rdev, enable); + cik_enable_gfx_static_mgpg(rdev, enable); + cik_enable_gfx_dynamic_mgpg(rdev, enable); } -void cik_init_pg(struct radeon_device *rdev) +static void cik_init_pg(struct radeon_device *rdev) { - bool has_pg = false; - - /* only APUs have PG */ - if (rdev->flags & RADEON_IS_IGP) { - /* XXX disable this for now */ - /* has_pg = true; */ - } - - if (has_pg) { + if (rdev->pg_flags) { cik_enable_sck_slowdown_on_pu(rdev, true); cik_enable_sck_slowdown_on_pd(rdev, true); - cik_init_gfx_cgpg(rdev); - cik_enable_cp_pg(rdev, true); - cik_enable_gds_pg(rdev, true); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + cik_init_gfx_cgpg(rdev); + cik_enable_cp_pg(rdev, true); + cik_enable_gds_pg(rdev, true); + } cik_init_ao_cu_mask(rdev); cik_update_gfx_pg(rdev, true); } } +static void cik_fini_pg(struct radeon_device *rdev) +{ + if (rdev->pg_flags) { + cik_update_gfx_pg(rdev, false); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + cik_enable_cp_pg(rdev, false); + cik_enable_gds_pg(rdev, false); + } + } +} + /* * Interrupts * Starting with r6xx, interrupts are handled via a ring buffer. @@ -7059,6 +7086,8 @@ int cik_suspend(struct radeon_device *rdev) cik_sdma_enable(rdev, false); uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + cik_fini_pg(rdev); + cik_fini_cg(rdev); cik_irq_suspend(rdev); radeon_wb_disable(rdev); cik_pcie_gart_disable(rdev); @@ -7214,6 +7243,8 @@ void cik_fini(struct radeon_device *rdev) { cik_cp_fini(rdev); cik_sdma_fini(rdev); + cik_fini_pg(rdev); + cik_fini_cg(rdev); cik_irq_fini(rdev); sumo_rlc_fini(rdev); cik_mec_fini(rdev); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 259b81c7cdd8..6a92f491cb91 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -285,6 +285,7 @@ #define PCIE_CNTL2 0x1001001c /* PCIE */ # define SLV_MEM_LS_EN (1 << 16) +# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17) # define MST_MEM_LS_EN (1 << 18) # define REPLAY_MEM_LS_EN (1 << 19) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 78e9bbc8446b..630853b96841 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2438,15 +2438,84 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->asic = &ci_asic; rdev->num_crtc = 6; rdev->has_uvd = true; + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CGTS_LS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_MC_LS | + RADEON_CG_SUPPORT_MC_MGCG | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_SDMA_LS | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; break; case CHIP_KAVERI: case CHIP_KABINI: rdev->asic = &kv_asic; /* set num crtcs */ - if (rdev->family == CHIP_KAVERI) + if (rdev->family == CHIP_KAVERI) { rdev->num_crtc = 4; - else + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CGTS_LS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_SDMA_LS | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + /*RADEON_PG_SUPPORT_GFX_CG | + RADEON_PG_SUPPORT_GFX_SMG | + RADEON_PG_SUPPORT_GFX_DMG | + RADEON_PG_SUPPORT_UVD | + RADEON_PG_SUPPORT_VCE | + RADEON_PG_SUPPORT_CP | + RADEON_PG_SUPPORT_GDS | + RADEON_PG_SUPPORT_RLC_SMU_HS | + RADEON_PG_SUPPORT_ACP | + RADEON_PG_SUPPORT_SAMU;*/ + } else { rdev->num_crtc = 2; + rdev->cg_flags = + RADEON_CG_SUPPORT_GFX_MGCG | + RADEON_CG_SUPPORT_GFX_MGLS | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ + RADEON_CG_SUPPORT_GFX_CGLS | + RADEON_CG_SUPPORT_GFX_CGTS | + RADEON_CG_SUPPORT_GFX_CGTS_LS | + RADEON_CG_SUPPORT_GFX_CP_LS | + RADEON_CG_SUPPORT_SDMA_MGCG | + RADEON_CG_SUPPORT_SDMA_LS | + RADEON_CG_SUPPORT_BIF_LS | + RADEON_CG_SUPPORT_VCE_MGCG | + RADEON_CG_SUPPORT_UVD_MGCG | + RADEON_CG_SUPPORT_HDP_LS | + RADEON_CG_SUPPORT_HDP_MGCG; + rdev->pg_flags = 0; + /*RADEON_PG_SUPPORT_GFX_CG | + RADEON_PG_SUPPORT_GFX_SMG | + RADEON_PG_SUPPORT_UVD | + RADEON_PG_SUPPORT_VCE | + RADEON_PG_SUPPORT_CP | + RADEON_PG_SUPPORT_GDS | + RADEON_PG_SUPPORT_RLC_SMU_HS | + RADEON_PG_SUPPORT_SAMU;*/ + } rdev->has_uvd = true; break; default: -- cgit v1.2.3 From ddc76ff6c78ecb189102bdc3bd9d14de5b750a6f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 12 Aug 2013 17:25:26 -0400 Subject: drm/radeon: fixes for gfx clockgating on CIK Clockgating requires signalling between the CP and the RLC to work properly. Resetting the CP block in the CP resume code messed up the internal coordination between the blocks. Removing the reset allows gfx clockgating to work properly. However, when gfx clock gating is enabled, there is a strange interaction with dpm which causes the chip to stay in the high performance level all the time, so leave gfx clockgating disabled for now. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 15 +++++---------- drivers/gpu/drm/radeon/radeon_asic.c | 6 +++--- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b7859fe3df80..1f088800295d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3974,13 +3974,6 @@ static int cik_cp_resume(struct radeon_device *rdev) { int r; - /* Reset all cp blocks */ - WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); - RREG32(GRBM_SOFT_RESET); - mdelay(15); - WREG32(GRBM_SOFT_RESET, 0); - RREG32(GRBM_SOFT_RESET); - r = cik_cp_load_microcode(rdev); if (r) return r; @@ -5060,9 +5053,9 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable) orig = data = RREG32(RLC_CGCG_CGLS_CTRL); - cik_enable_gui_idle_interrupt(rdev, enable); - if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { + cik_enable_gui_idle_interrupt(rdev, true); + tmp = cik_halt_rlc(rdev); cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); @@ -5075,6 +5068,8 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable) data |= CGCG_EN | CGLS_EN; } else { + cik_enable_gui_idle_interrupt(rdev, false); + RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); @@ -5383,7 +5378,7 @@ void cik_update_cg(struct radeon_device *rdev, static void cik_init_cg(struct radeon_device *rdev) { - cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false); /* XXX true */ + cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true); if (rdev->has_uvd) si_init_uvd_internal_cg(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 630853b96841..6152169d011f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2439,7 +2439,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->num_crtc = 6; rdev->has_uvd = true; rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2464,7 +2464,7 @@ int radeon_asic_init(struct radeon_device *rdev) if (rdev->family == CHIP_KAVERI) { rdev->num_crtc = 4; rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2492,7 +2492,7 @@ int radeon_asic_init(struct radeon_device *rdev) } else { rdev->num_crtc = 2; rdev->cg_flags = - RADEON_CG_SUPPORT_GFX_MGCG | + /*RADEON_CG_SUPPORT_GFX_MGCG |*/ RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | -- cgit v1.2.3 From a0f38609c9870fe0e3d5c10b1e6926a5750d0a7a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 22 Aug 2013 11:57:46 -0400 Subject: drm/radeon/cik: properly set up the clearstate buffer for pg (v2) The format of the clearstate buffer used for pg (powergating) changed between NI and SI. This formats it properly for what the hardware expects on SI+. v2: fix addresses Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 93 +++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/radeon/evergreen.c | 13 +++++- 2 files changed, 103 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1f088800295d..582f8e4f36d4 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5622,7 +5622,7 @@ static void cik_init_gfx_cgpg(struct radeon_device *rdev) if (rdev->rlc.cs_data) { WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr)); - WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_gpu_addr); + WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr)); WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size); } else { WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); @@ -5670,6 +5670,97 @@ static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable) cik_enable_gfx_dynamic_mgpg(rdev, enable); } +u32 cik_get_csb_size(struct radeon_device *rdev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config/pa_sc_raster_config1 */ + count += 4; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); + buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; + + buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); + buffer[count++] = 0x80000000; + buffer[count++] = 0x80000000; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); + buffer[count++] = ext->reg_index - 0xa000; + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = ext->extent[i]; + } else { + return; + } + } + } + + buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2); + buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; + switch (rdev->family) { + case CHIP_BONAIRE: + buffer[count++] = 0x16000012; + buffer[count++] = 0x00000000; + break; + case CHIP_KAVERI: + buffer[count++] = 0x00000000; /* XXX */ + buffer[count++] = 0x00000000; + break; + case CHIP_KABINI: + buffer[count++] = 0x00000000; /* XXX */ + buffer[count++] = 0x00000000; + break; + default: + buffer[count++] = 0x00000000; + buffer[count++] = 0x00000000; + break; + } + + buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); + buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; + + buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); + buffer[count++] = 0; +} + static void cik_init_pg(struct radeon_device *rdev) { if (rdev->pg_flags) { diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 536908109001..2ca9f13f2c79 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -144,6 +144,8 @@ void cik_init_cp_pg_table(struct radeon_device *rdev); extern u32 si_get_csb_size(struct radeon_device *rdev); extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); +extern u32 cik_get_csb_size(struct radeon_device *rdev); +extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); static const u32 evergreen_golden_registers[] = { @@ -3903,6 +3905,9 @@ int sumo_rlc_init(struct radeon_device *rdev) src_ptr = rdev->rlc.reg_list; dws = rdev->rlc.reg_list_size; + if (rdev->family >= CHIP_BONAIRE) { + dws += (5 * 16) + 48 + 48 + 64; + } cs_data = rdev->rlc.cs_data; if (src_ptr) { @@ -3966,7 +3971,9 @@ int sumo_rlc_init(struct radeon_device *rdev) if (cs_data) { /* clear state block */ - if (rdev->family >= CHIP_TAHITI) { + if (rdev->family >= CHIP_BONAIRE) { + rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev); + } else if (rdev->family >= CHIP_TAHITI) { rdev->rlc.clear_state_size = si_get_csb_size(rdev); dws = rdev->rlc.clear_state_size + (256 / 4); } else { @@ -4014,7 +4021,9 @@ int sumo_rlc_init(struct radeon_device *rdev) } /* set up the cs buffer */ dst_ptr = rdev->rlc.cs_ptr; - if (rdev->family >= CHIP_TAHITI) { + if (rdev->family >= CHIP_BONAIRE) { + cik_get_csb_buffer(rdev, dst_ptr); + } else if (rdev->family >= CHIP_TAHITI) { reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; dst_ptr[0] = upper_32_bits(reg_list_mc_addr); dst_ptr[1] = lower_32_bits(reg_list_mc_addr); -- cgit v1.2.3 From cf0ab2cd450357bd430c6799aed18a427a8420f6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 13:55:53 -0400 Subject: drm/radeon: handle cg in CI dpm code Clockgating needs to be disabled around certain parts of dpm setup otherwise the smc gets into a bad state and dpm doesn't work properly. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index e4d9d50ce908..af04b5c28d96 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -155,6 +155,8 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, struct atom_voltage_table *voltage_table); extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); +extern void cik_update_cg(struct radeon_device *rdev, + u32 block, bool enable); static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, struct atom_voltage_table_entry *voltage_table, @@ -4492,6 +4494,13 @@ int ci_dpm_enable(struct radeon_device *rdev) struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; int ret; + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + if (ci_is_smc_running(rdev)) return -EINVAL; if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { @@ -4611,6 +4620,13 @@ int ci_dpm_enable(struct radeon_device *rdev) ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), true); + ci_update_current_ps(rdev, boot_ps); return 0; @@ -4621,6 +4637,12 @@ void ci_dpm_disable(struct radeon_device *rdev) struct ci_power_info *pi = ci_get_pi(rdev); struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + if (!ci_is_smc_running(rdev)) return; @@ -4649,6 +4671,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) struct radeon_ps *old_ps = &pi->current_rps; int ret; + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), false); + ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); if (pi->pcie_performance_request) ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); @@ -4710,6 +4739,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) return ret; } + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_UVD | + RADEON_CG_BLOCK_HDP), true); + return 0; } -- cgit v1.2.3 From 6500fc0c9fd9a0c3cde1b498541a259d1ba078ba Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 19:55:46 -0400 Subject: drm/radeon: handle cg in KB/KV dpm code Clockgating needs to be disabled around certain parts of dpm setup otherwise the smc gets into a bad state and dpm doesn't work properly. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/kv_dpm.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index ef6c901690da..7c7108f6417e 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1105,6 +1105,11 @@ int kv_dpm_enable(struct radeon_device *rdev) struct kv_power_info *pi = kv_get_pi(rdev); int ret; + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); + ret = kv_process_firmware_header(rdev); if (ret) { DRM_ERROR("kv_process_firmware_header failed\n"); @@ -1204,6 +1209,11 @@ int kv_dpm_enable(struct radeon_device *rdev) kv_dpm_powergate_vce(rdev, true); kv_dpm_powergate_uvd(rdev, true); + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), true); + kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); return ret; @@ -1211,6 +1221,11 @@ int kv_dpm_enable(struct radeon_device *rdev) void kv_dpm_disable(struct radeon_device *rdev) { + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); + kv_enable_smc_cac(rdev, false); kv_enable_didt(rdev, false); kv_clear_vc(rdev); @@ -1695,6 +1710,11 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) /*struct radeon_ps *old_ps = &pi->current_rps;*/ int ret; + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); + if (rdev->family == CHIP_KABINI) { if (pi->enable_dpm) { kv_set_valid_clock_range(rdev, new_ps); @@ -1750,6 +1770,12 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) kv_enable_nb_dpm(rdev); } } + + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), true); + rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; return 0; } -- cgit v1.2.3 From 773dc10a8acd28c19947b557094d4c1ec0043998 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Aug 2013 18:58:43 -0400 Subject: drm/radeon: enable mgcg on CIK Now that the CP is no longer reset and cg is properly disabled in when appropriate in the dpm code we can now enable mgcg (medium grained clockgating). Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 6152169d011f..630853b96841 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2439,7 +2439,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->num_crtc = 6; rdev->has_uvd = true; rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2464,7 +2464,7 @@ int radeon_asic_init(struct radeon_device *rdev) if (rdev->family == CHIP_KAVERI) { rdev->num_crtc = 4; rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | @@ -2492,7 +2492,7 @@ int radeon_asic_init(struct radeon_device *rdev) } else { rdev->num_crtc = 2; rdev->cg_flags = - /*RADEON_CG_SUPPORT_GFX_MGCG |*/ + RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | -- cgit v1.2.3 From e5b9e7503eb1f4884efa3b321d3cc47806779202 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Fri, 16 Aug 2013 17:47:39 -0400 Subject: drm/radeon/si: Add support for CP DMA to CS checker for compute v2 Also add a new RADEON_INFO query to check that CP DMA packets are supported on the compute ring. CP DMA has been supported since the 3.8 kernel, but due to an oversight we forgot to teach the CS checker that the CP DMA packet was legal for the compute ring on Southern Islands GPUs. This patch fixes a bug where the radeon driver will incorrectly reject a legal CP DMA packet from user space. I would like to have the patch backported to stable so that we don't have to require Mesa users to use a bleeding edge kernel in order to take advantage of this feature which is already present in the stable kernels (3.8 and newer). v2: - Don't bump kms version, so this patch can be backported to stable kernels. Cc: stable@vger.kernel.org Signed-off-by: Tom Stellard Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_kms.c | 3 + drivers/gpu/drm/radeon/si.c | 106 +++++++++++++++++++++--------------- include/uapi/drm/radeon_drm.h | 2 + 3 files changed, 66 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 49ff3d1a6102..cc2ca380e0c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_SI_CP_DMA_COMPUTE: + *value = 1; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d5f77eb23441..0b4e979b2cbf 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4051,13 +4051,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev, return 0; } +static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx) +{ + u32 start_reg, reg, i; + u32 command = ib[idx + 4]; + u32 info = ib[idx + 1]; + u32 idx_value = ib[idx]; + if (command & PACKET3_CP_DMA_CMD_SAS) { + /* src address space is register */ + if (((info & 0x60000000) >> 29) == 0) { + start_reg = idx_value << 2; + if (command & PACKET3_CP_DMA_CMD_SAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } + } + } + } + if (command & PACKET3_CP_DMA_CMD_DAS) { + /* dst address space is register */ + if (((info & 0x00300000) >> 20) == 0) { + start_reg = ib[idx + 2]; + if (command & PACKET3_CP_DMA_CMD_DAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } + } + } + } + return 0; +} + static int si_vm_packet3_gfx_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { + int r; u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, end_reg, reg, i; - u32 command, info; switch (pkt->opcode) { case PACKET3_NOP: @@ -4158,50 +4209,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, } break; case PACKET3_CP_DMA: - command = ib[idx + 4]; - info = ib[idx + 1]; - if (command & PACKET3_CP_DMA_CMD_SAS) { - /* src address space is register */ - if (((info & 0x60000000) >> 29) == 0) { - start_reg = idx_value << 2; - if (command & PACKET3_CP_DMA_CMD_SAIC) { - reg = start_reg; - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); - return -EINVAL; - } - } else { - for (i = 0; i < (command & 0x1fffff); i++) { - reg = start_reg + (4 * i); - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); - return -EINVAL; - } - } - } - } - } - if (command & PACKET3_CP_DMA_CMD_DAS) { - /* dst address space is register */ - if (((info & 0x00300000) >> 20) == 0) { - start_reg = ib[idx + 2]; - if (command & PACKET3_CP_DMA_CMD_DAIC) { - reg = start_reg; - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); - return -EINVAL; - } - } else { - for (i = 0; i < (command & 0x1fffff); i++) { - reg = start_reg + (4 * i); - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); - return -EINVAL; - } - } - } - } - } + r = si_vm_packet3_cp_dma_check(ib, idx); + if (r) + return r; break; default: DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); @@ -4213,6 +4223,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, static int si_vm_packet3_compute_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { + int r; u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, reg, i; @@ -4285,6 +4296,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev, return -EINVAL; } break; + case PACKET3_CP_DMA: + r = si_vm_packet3_cp_dma_check(ib, idx); + if (r) + return r; + break; default: DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); return -EINVAL; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 321d4ac5c512..fa8b3adf9ffb 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -979,6 +979,8 @@ struct drm_radeon_cs { #define RADEON_INFO_RING_WORKING 0x15 /* SI tile mode array */ #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 +/* query if CP DMA is supported on the compute ring */ +#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 struct drm_radeon_info { -- cgit v1.2.3 From 0b31e02363b0db4e7931561bc6c141436e729d9f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Aug 2013 11:06:50 -0400 Subject: drm/radeon: update line buffer allocation for dce4.1/5 We need to allocate line buffer to each display when setting up the watermarks. Failure to do so can lead to a blank screen. This fixes blank screen problems on dce4.1/5 asics. Based on an initial fix from: Jay Cornwall Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/evergreen.c | 25 +++++++++++++++++++++---- drivers/gpu/drm/radeon/evergreend.h | 4 ++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 2ca9f13f2c79..183213689478 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1812,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -1835,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } /* second controller of the pair uses second half of the lb */ if (radeon_crtc->crtc_id % 2) tmp += 4; WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 430997a70acc..8768fd6a1e27 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1160,6 +1160,10 @@ # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ -- cgit v1.2.3 From 290d24576ccf1aa0373d2185cedfe262d0d4952a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Aug 2013 11:15:43 -0400 Subject: drm/radeon: update line buffer allocation for dce6 We need to allocate line buffer to each display when setting up the watermarks. Failure to do so can lead to a blank screen. This fixes blank screen problems on dce6 asics. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=64850 Based on an initial fix from: Jay Cornwall Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/si.c | 23 +++++++++++++++++++---- drivers/gpu/drm/radeon/sid.h | 4 ++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 0b4e979b2cbf..89393ed593fa 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1711,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -1726,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, DC_LB_MEMORY_CONFIG(tmp)); + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 91dae16fddc4..52d2ab6b67a0 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -282,6 +282,10 @@ #define DMIF_ADDR_CALC 0xC00 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define SRBM_STATUS 0xE50 #define GRBM_RQ_PENDING (1 << 5) #define VMC_BUSY (1 << 8) -- cgit v1.2.3 From bc01a8c7a24169f8b111b7dda6f5d8e7088309af Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Aug 2013 11:39:27 -0400 Subject: drm/radeon: update line buffer allocation for dce8 We need to allocate line buffer to each display when setting up the watermarks. Failure to do so can lead to a blank screen. This fixes blank screen problems on dce8 asics. Based on an initial fix from: Jay Cornwall Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/cik.c | 29 ++++++++++++++++++++++------- drivers/gpu/drm/radeon/cikd.h | 4 ++++ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 582f8e4f36d4..1942571496ea 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7367,8 +7367,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc, struct drm_display_mode *mode) { - u32 tmp; - + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 6 line buffers, one for each display controllers. @@ -7378,22 +7378,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev, * them using the stereo blender. */ if (radeon_crtc->base.enabled && mode) { - if (mode->crtc_hdisplay < 1920) + if (mode->crtc_hdisplay < 1920) { tmp = 1; - else if (mode->crtc_hdisplay < 2560) + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 2560) { tmp = 2; - else if (mode->crtc_hdisplay < 4096) + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 4096) { tmp = 0; - else { + buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4; + } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); tmp = 0; + buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4; } - } else + } else { tmp = 1; + buffer_alloc = 0; + } WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset, LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0)); + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 6a92f491cb91..203d2a09a1f5 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -393,6 +393,10 @@ #define DMIF_ADDR_CALC 0xC00 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define SRBM_GFX_CNTL 0xE44 #define PIPEID(x) ((x) << 0) #define MEID(x) ((x) << 2) -- cgit v1.2.3 From 9cb84ab0abd1f5af2482387f8ff585dcfa30b9e6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Aug 2013 19:06:19 -0400 Subject: drm/radeon: check firmware overrides for mclk/sclk ss Check the overrides in the firmware info table before enabling spread spectrum on the engine or memory clocks. Some boards may have valid spread spectrum tables, but shouldn't necessarily have it enabled. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_atombios.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 7ba439e9f30f..4ac5f4027620 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1480,6 +1480,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, uint8_t frev, crev; int i, num_indices; + if (id == ASIC_INTERNAL_MEMORY_SS) { + if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT)) + return false; + } + if (id == ASIC_INTERNAL_ENGINE_SS) { + if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT)) + return false; + } + memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { -- cgit v1.2.3 From 95663948ba22a4be8b99acd67fbf83e86ddffba4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 14:59:01 -0400 Subject: drm/radeon: fix LCD record parsing If the LCD table contains an EDID record, properly account for the edid size when walking through the records. This should fix error messages about unknown LCD records. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_atombios.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4ac5f4027620..112c96352562 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1681,7 +1681,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct kfree(edid); } } - record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); + record += fake_edid_record->ucFakeEDIDLength ? + fake_edid_record->ucFakeEDIDLength + 2 : + sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; -- cgit v1.2.3 From d1e3b5564834ea24dee6b364a172365f64865fe5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 21 Aug 2013 13:48:12 -0400 Subject: drm/radeon: atombios hw i2c fixes These fixes make writes work properly. Previously only reads worked. Note that this feature is off by default. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_i2c.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 2ca389d19258..deaf98cdca3a 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -32,7 +32,7 @@ extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ -#define ATOM_MAX_HW_I2C_WRITE 2 +#define ATOM_MAX_HW_I2C_WRITE 3 #define ATOM_MAX_HW_I2C_READ 255 static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, @@ -52,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { - DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); + DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); return -EINVAL; } - memcpy(&out, buf, num); + args.ucRegIndex = buf[0]; + if (num > 1) + memcpy(&out, &buf[1], num - 1); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); return -EINVAL; } + args.ucRegIndex = 0; + args.lpI2CDataOut = 0; } + args.ucFlag = flags; args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; - args.ucRegIndex = 0; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; -- cgit v1.2.3 From ac4d04d4be51802e7bf6a46a987e47fdc32af6b4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 21 Aug 2013 14:44:15 -0400 Subject: drm/radeon: disable the GRPH block when we disable the crtc Since we aren't using it when the crtc is disabled, turn it off to save power. The GRPH block is the part of the display controller that controls the primary graphics plane (size, address, etc.). Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_crtc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b9d3b43f19c0..bf87f6d435f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + /* disable the GRPH */ + if (ASIC_IS_DCE4(rdev)) + WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0); + else if (ASIC_IS_AVIVO(rdev)) + WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0); + if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_ENABLE); -- cgit v1.2.3 From 9597fe1e6aa4a1626502b03770255345ec5a2ed7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 23 Aug 2013 11:06:12 -0400 Subject: drm/radeon: enable uvd dpm on CI UVD dpm dynamically adjusts the uvd clocks on demand. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index af04b5c28d96..4ba1fd4918e9 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5073,6 +5073,8 @@ int ci_dpm_init(struct radeon_device *rdev) pi->caps_sclk_throttle_low_notification = false; + pi->caps_uvd_dpm = true; + ci_get_leakage_voltages(rdev); ci_patch_dependency_tables_with_leakage(rdev); ci_set_private_data_variables_based_on_pptable(rdev); -- cgit v1.2.3 From 47acb1ff9b12b7121a9f8d7589d9ad88dafa603c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 26 Aug 2013 09:43:24 -0400 Subject: drm/radeon/dpm: track uvd gated state for ci Track the current uvd gated state on CI to avoid unnecessary state changes when uvd is active. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 13 +++++++++++++ drivers/gpu/drm/radeon/ci_dpm.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 4ba1fd4918e9..7a6068968b70 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -681,6 +681,13 @@ static int ci_power_control_set_level(struct radeon_device *rdev) void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) { + struct ci_power_info *pi = ci_get_pi(rdev); + + if (pi->uvd_power_gated == gate) + return; + + pi->uvd_power_gated = gate; + ci_update_uvd_dpm(rdev, gate); } @@ -4620,6 +4627,8 @@ int ci_dpm_enable(struct radeon_device *rdev) ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + ci_dpm_powergate_uvd(rdev, true); + cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_SDMA | @@ -4643,6 +4652,8 @@ void ci_dpm_disable(struct radeon_device *rdev) RADEON_CG_BLOCK_UVD | RADEON_CG_BLOCK_HDP), false); + ci_dpm_powergate_uvd(rdev, false); + if (!ci_is_smc_running(rdev)) return; @@ -5163,6 +5174,8 @@ int ci_dpm_init(struct radeon_device *rdev) pi->caps_dynamic_ac_timing = true; + pi->uvd_power_gated = false; + return 0; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h index de504b5ac33f..93bbed977ffb 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.h +++ b/drivers/gpu/drm/radeon/ci_dpm.h @@ -279,6 +279,7 @@ struct ci_power_info { bool enable_pkg_pwr_tracking_feature; bool use_pcie_performance_levels; bool use_pcie_powersaving_levels; + bool uvd_power_gated; /* driver states */ struct radeon_ps current_rps; struct ci_ps current_ps; -- cgit v1.2.3 From 39c88ae314b0ec8084ccafdee416d8b7ff736d32 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 26 Aug 2013 09:46:51 -0400 Subject: drm/radeon/dpm: ungate blocks in dpm disable for kb/kv These blocks need to be ungated for the other parts of the driver properly initialize them (e.g., after a gpu reset, etc.). Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/kv_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 7c7108f6417e..a10207783065 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1226,6 +1226,12 @@ void kv_dpm_disable(struct radeon_device *rdev) RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_HDP), false); + /* powerup blocks */ + kv_dpm_powergate_acp(rdev, false); + kv_dpm_powergate_samu(rdev, false); + kv_dpm_powergate_vce(rdev, false); + kv_dpm_powergate_uvd(rdev, false); + kv_enable_smc_cac(rdev, false); kv_enable_didt(rdev, false); kv_clear_vc(rdev); -- cgit v1.2.3 From b2e4c70a9747ecb618d563b004ba746869dde5aa Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Jun 2013 15:18:26 -0400 Subject: drm/radeon: fill in gpu_init for berlin GPU cores This fills in the GPU specific details for berlin GPU cores so that the driver will work with them. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/cik.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1942571496ea..e336a31230ea 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2708,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev) gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KAVERI: - /* TODO */ + rdev->config.cik.max_shader_engines = 1; + rdev->config.cik.max_tile_pipes = 4; + if ((rdev->pdev->device == 0x1304) || + (rdev->pdev->device == 0x1305) || + (rdev->pdev->device == 0x130C) || + (rdev->pdev->device == 0x130F) || + (rdev->pdev->device == 0x1310) || + (rdev->pdev->device == 0x1311) || + (rdev->pdev->device == 0x131C)) { + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; + } else if ((rdev->pdev->device == 0x1309) || + (rdev->pdev->device == 0x130A) || + (rdev->pdev->device == 0x130D) || + (rdev->pdev->device == 0x1313)) { + rdev->config.cik.max_cu_per_sh = 6; + rdev->config.cik.max_backends_per_se = 2; + } else if ((rdev->pdev->device == 0x1306) || + (rdev->pdev->device == 0x1307) || + (rdev->pdev->device == 0x130B) || + (rdev->pdev->device == 0x130E) || + (rdev->pdev->device == 0x1315) || + (rdev->pdev->device == 0x131B)) { + rdev->config.cik.max_cu_per_sh = 4; + rdev->config.cik.max_backends_per_se = 1; + } else { + rdev->config.cik.max_cu_per_sh = 3; + rdev->config.cik.max_backends_per_se = 1; + } + rdev->config.cik.max_sh_per_se = 1; + rdev->config.cik.max_texture_channel_caches = 4; + rdev->config.cik.max_gprs = 256; + rdev->config.cik.max_gs_threads = 16; + rdev->config.cik.max_hw_contexts = 8; + + rdev->config.cik.sc_prim_fifo_size_frontend = 0x20; + rdev->config.cik.sc_prim_fifo_size_backend = 0x100; + rdev->config.cik.sc_hiz_tile_fifo_size = 0x30; + rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KABINI: default: -- cgit v1.2.3 From 0431b2742f8e7755f3bbf5924900d12973412e94 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Jun 2013 15:51:21 -0400 Subject: drm/radeon: add berlin pci ids This adds the pci ids for the berlin GPU core. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- include/drm/drm_pciids.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 34efaf64cc87..78bc8041a8a1 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -1,4 +1,22 @@ #define radeon_PCI_IDS \ + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ -- cgit v1.2.3 From acf88deb8ddbb73acd1c3fa32fde51af9153227f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 26 Aug 2013 17:52:12 -0400 Subject: drm/radeon: fix resume on some rs4xx boards (v2) Setting MC_MISC_CNTL.GART_INDEX_REG_EN causes hangs on some boards on resume. The systems seem to work fine without touching this bit so leave it as is. v2: read-modify-write the GART_INDEX_REG_EN bit. I suspect the problem is that we are losing the other settings in the register. fixes: https://bugs.freedesktop.org/show_bug.cgi?id=52952 Reported-by: Ondrej Zary Tested-by: Daniel Tobias Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/rs400.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 233a9b9fa1f7..b8074a8ec75a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev) /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { - WREG32_MC(RS480_MC_MISC_CNTL, - (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } else { - WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } /* Enable gart */ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); -- cgit v1.2.3 From fb93df1c2d8b3b1fb16d6ee9e32554e0c038815d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Aug 2013 12:36:01 -0400 Subject: drm/radeon: fix handling of variable sized arrays for router objects The table has the following format: typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure { UCHAR ucNumberOfSrc; USHORT usSrcObjectID[1]; UCHAR ucNumberOfDst; USHORT usDstObjectID[1]; }ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; usSrcObjectID[] and usDstObjectID[] are variably sized, so we can't access them directly. Use pointers and update the offset appropriately when accessing the Dst members. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_atombios.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 112c96352562..ad913542ec1a 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -711,13 +711,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); + u8 *num_dst_objs = (u8 *) + ((u8 *)router_src_dst_table + 1 + + (router_src_dst_table->ucNumberOfSrc * 2)); + u16 *dst_objs = (u16 *)(num_dst_objs + 1); int enum_id; router.router_id = router_obj_id; - for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; - enum_id++) { + for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { if (le16_to_cpu(path->usConnObjectId) == - le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) + le16_to_cpu(dst_objs[enum_id])) break; } -- cgit v1.2.3 From c1cbee0ec0697c531778fbaf34aa358c0f5ef00e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Aug 2013 10:51:04 -0400 Subject: drm/radeon/audio: set up the sads on DCE3.2 asics This sets up the short audio descriptors properly on DCE3.2 asics for hdmi audio. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_hdmi.c | 66 +++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/radeon/r600d.h | 29 +++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 6d7128d02493..f443010ce90b 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -322,6 +322,68 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) kfree(sadb); } +static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + struct cea_sad *sads; + int i, sad_count; + + static const u16 eld_reg_to_type[][2] = { + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + radeon_connector = to_radeon_connector(connector); + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); + if (sad_count < 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 value = 0; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + value = MAX_CHANNELS(sad->channels) | + DESCRIPTOR_BYTE_2(sad->byte2) | + SUPPORTED_FREQUENCIES(sad->freq); + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); + break; + } + } + WREG32(eld_reg_to_type[i][0], value); + } + + kfree(sads); +} + /* * update the info frames with the data from the current display mode */ @@ -366,8 +428,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ } - if (ASIC_IS_DCE32(rdev)) + if (ASIC_IS_DCE32(rdev)) { dce3_2_afmt_write_speaker_allocation(encoder); + dce3_2_afmt_write_sad_regs(encoder); + } WREG32(HDMI0_ACR_PACKET_CONTROL + offset, HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 44ec7a148c3d..454f90a849e4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -967,6 +967,35 @@ #define HDMI_CONNECTION (1 << 16) #define DP_CONNECTION (1 << 17) +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */ +# define MAX_CHANNELS(x) (((x) & 0x7) << 0) +/* max channels minus one. 7 = 8 channels */ +# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) +# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) +# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ +/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO + * bit0 = 32 kHz + * bit1 = 44.1 kHz + * bit2 = 48 kHz + * bit3 = 88.2 kHz + * bit4 = 96 kHz + * bit5 = 176.4 kHz + * bit6 = 192 kHz + */ + /* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly * different due to the new DIG blocks, but also have 2 instances. -- cgit v1.2.3 From 27c505ca84e164ec66ad55dcf3f5befaac83f10a Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 29 Aug 2013 12:29:35 +0300 Subject: radeon kms: fix uninitialised hotplug work usage in r100_irq_process() Commit a01c34f72e7cd2624570818f579b5ab464f93de2 (radeon kms: do not flush uninitialized hotplug work) moved work initialisation phase to the last step of radeon_irq_kms_init(). Meelis Roos reported that this causes problems on his machine because drm_irq_install() uses hotplug work on r100. hotplug work flushed in radeon_irq_kms_fini(), with two possible cases: -- radeon_irq_kms_fini() call after successful radeon_irq_kms_init() -- radeon_irq_kms_fini() call after unsuccessful (or not called at all) radeon_irq_kms_init() The latter one causes flush work on uninitialised hotplug work. Move work initialisation before drm_irq_install(), but keep existing agreement to flush hotplug work in radeon_irq_kms_fini() only for `irq.installed' (successful radeon_irq_kms_init()) case. WARNING: CPU: 0 PID: 243 at kernel/workqueue.c:1378 __queue_work+0x132/0x16d() Call Trace: [] ? dump_stack+0xa/0x13 [] ? warn_slowpath_common+0x75/0x8a [] ? __queue_work+0x132/0x16d [] ? __queue_work+0x132/0x16d [] ? warn_slowpath_null+0x1b/0x1f [] ? __queue_work+0x132/0x16d [] ? queue_work_on+0x30/0x40 [] ? r100_irq_process+0x16d/0x1e6 [radeon] [] ? radeon_driver_irq_preinstall_kms+0xc2/0xc5 [radeon] [] ? drm_irq_install+0xb2/0x1ac [drm] [] ? drm_vblank_init+0x196/0x1d2 [drm] [] ? radeon_irq_kms_init+0x33/0xc6 [radeon] [] ? r100_startup+0x1a3/0x1d6 [radeon] [] ? radeon_ttm_init+0x26e/0x287 [radeon] [] ? r100_init+0x2b3/0x309 [radeon] [] ? vga_client_register+0x39/0x40 [] ? radeon_device_init+0x54b/0x61b [radeon] [] ? cail_mc_write+0x13/0x13 [radeon] [] ? radeon_driver_load_kms+0x82/0xda [radeon] [] ? drm_get_pci_dev+0x136/0x22d [drm] [] ? radeon_pci_probe+0x6c/0x86 [radeon] [] ? pci_device_probe+0x4c/0x83 [] ? driver_probe_device+0x80/0x184 [] ? pci_match_id+0x18/0x36 [] ? __driver_attach+0x44/0x5f [] ? bus_for_each_dev+0x50/0x5a [] ? driver_attach+0x14/0x16 [] ? __device_attach+0x28/0x28 [] ? bus_add_driver+0xd6/0x1bf [] ? driver_register+0x78/0xcf [] ? 0xf8ba7fff [] ? do_one_initcall+0x8b/0x121 [] ? change_page_attr_clear+0x2e/0x33 [] ? 0xf8ba7fff [] ? set_memory_ro+0x1c/0x20 [] ? set_page_attributes+0x11/0x12 [] ? load_module+0x12fa/0x17e8 [] ? map_vm_area+0x22/0x31 [] ? SyS_init_module+0x67/0x7d [] ? sysenter_do_call+0x12/0x26 Reported-by: Meelis Roos Tested-by: Meelis Roos Signed-off-by: Sergey Senozhatsky Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_irq_kms.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 081886b0642d..cc9e8482cf30 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev) dev_info(rdev->dev, "radeon: using MSI.\n"); } } + + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); + INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); + rdev->irq.installed = true; r = drm_irq_install(rdev->ddev); if (r) { rdev->irq.installed = false; + flush_work(&rdev->hotplug_work); return r; } - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); - DRM_INFO("radeon: irq initialized.\n"); return 0; } -- cgit v1.2.3 From 2ce529dac71ae7753981a587932d074fdb248608 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 28 Aug 2013 18:12:59 -0400 Subject: drm/radeon: split out radeon_uvd_resume from uvd_v4_2_resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For powergating, we just need to re-init the registers, there is no need to restore the uvd BOs. This just adds needless work when powergating uvd for playback while the system is on. We only need to restore the uvd BOs on an actual resume from suspend or when the driver loads. This fixes multi-stream UVD playback on KB systems. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/cik.c | 13 ++++++++----- drivers/gpu/drm/radeon/uvd_v4_2.c | 5 ----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e336a31230ea..79124f81c00e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7051,12 +7051,15 @@ static int cik_startup(struct radeon_device *rdev) return r; } - r = uvd_v4_2_resume(rdev); + r = radeon_uvd_resume(rdev); if (!r) { - r = radeon_fence_driver_start_ring(rdev, - R600_RING_TYPE_UVD_INDEX); - if (r) - dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + r = uvd_v4_2_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, + R600_RING_TYPE_UVD_INDEX); + if (r) + dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + } } if (r) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c index d7e480786098..d04d5073eef2 100644 --- a/drivers/gpu/drm/radeon/uvd_v4_2.c +++ b/drivers/gpu/drm/radeon/uvd_v4_2.c @@ -39,11 +39,6 @@ int uvd_v4_2_resume(struct radeon_device *rdev) { uint64_t addr; uint32_t size; - int r; - - r = radeon_uvd_resume(rdev); - if (r) - return r; /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; -- cgit v1.2.3 From a7f28f0f55ce484ef6047fa1f42d57daaeb1b634 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 28 Aug 2013 18:24:00 -0400 Subject: drm/radeon: check the return value of uvd_v1_0_start in uvd_v1_0_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to try the ring tests if starting the UVD block failed. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/uvd_v1_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 76ca669f0c8e..3426be9aa38a 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -85,7 +85,9 @@ int uvd_v1_0_init(struct radeon_device *rdev) /* raise clocks while booting up the VCPU */ radeon_set_uvd_clocks(rdev, 53300, 40000); - uvd_v1_0_start(rdev); + r = uvd_v1_0_start(rdev); + if (r) + goto done; ring->ready = true; r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); -- cgit v1.2.3 From f30df435ac6136787e65646881e62f12df2d71f6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 28 Aug 2013 18:46:01 -0400 Subject: drm/radeon/dpm: only need to reprogram uvd if uvd pg is enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid needless uvd reprogramming if uvd powergating is disabled. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/kv_dpm.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index a10207783065..15a6f67813d7 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1491,17 +1491,20 @@ void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) pi->uvd_power_gated = gate; if (gate) { - uvd_v1_0_stop(rdev); - cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); + if (pi->caps_uvd_pg) { + uvd_v1_0_stop(rdev); + cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); + } kv_update_uvd_dpm(rdev, gate); if (pi->caps_uvd_pg) kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); } else { - if (pi->caps_uvd_pg) + if (pi->caps_uvd_pg) { kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); - uvd_v4_2_resume(rdev); - uvd_v1_0_start(rdev); - cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); + uvd_v4_2_resume(rdev); + uvd_v1_0_start(rdev); + cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); + } kv_update_uvd_dpm(rdev, gate); } } -- cgit v1.2.3 From e5903d399a7b0e5c14673c1206f4aeec2859c730 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Aug 2013 08:58:20 -0400 Subject: drm/radeon: fix init ordering for r600+ The vram scratch buffer needs to be initialized before the mc is programmed otherwise we program 0 as the GPU address of the default GPU fault page. In most cases we put vram at zero anyway and reserve a page for the legacy vga buffer so in practice this shouldn't cause any problems, but better to make it correct. Was changed in: 6fab3febf6d949b0a12b1e4e73db38e4a177a79e Reported-by: FrankR Huang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/cik.c | 9 +++++---- drivers/gpu/drm/radeon/evergreen.c | 9 +++++---- drivers/gpu/drm/radeon/ni.c | 9 +++++---- drivers/gpu/drm/radeon/r600.c | 9 +++++---- drivers/gpu/drm/radeon/rv770.c | 9 +++++---- drivers/gpu/drm/radeon/si.c | 9 +++++---- 6 files changed, 30 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 79124f81c00e..148c539684bb 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6951,6 +6951,11 @@ static int cik_startup(struct radeon_device *rdev) /* enable aspm */ cik_program_aspm(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + cik_mc_program(rdev); if (rdev->flags & RADEON_IS_IGP) { @@ -6980,10 +6985,6 @@ static int cik_startup(struct radeon_device *rdev) } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - r = cik_pcie_gart_enable(rdev); if (r) return r; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 183213689478..6398c1f76fb8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5053,6 +5053,11 @@ static int evergreen_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + evergreen_mc_program(rdev); if (ASIC_IS_DCE5(rdev)) { @@ -5078,10 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev) } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 69499fff06b0..d60049efd7ac 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1863,6 +1863,11 @@ static int cayman_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + evergreen_mc_program(rdev); if (rdev->flags & RADEON_IS_IGP) { @@ -1889,10 +1894,6 @@ static int cayman_startup(struct radeon_device *rdev) } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - r = cayman_pcie_gart_enable(rdev); if (r) return r; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 087cff444ba2..b72d4d717a72 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2698,6 +2698,11 @@ static int r600_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ r600_pcie_gen2_enable(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + r600_mc_program(rdev); if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { @@ -2708,10 +2713,6 @@ static int r600_startup(struct radeon_device *rdev) } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b811296462a3..9f5846743c9e 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1658,6 +1658,11 @@ static int rv770_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ rv770_pcie_gen2_enable(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + rv770_mc_program(rdev); if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { @@ -1668,10 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev) } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 89393ed593fa..fe8bca686900 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6343,6 +6343,11 @@ static int si_startup(struct radeon_device *rdev) /* enable aspm */ si_program_aspm(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + si_mc_program(rdev); if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || @@ -6360,10 +6365,6 @@ static int si_startup(struct radeon_device *rdev) return r; } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - r = si_pcie_gart_enable(rdev); if (r) return r; -- cgit v1.2.3 From 6a3808b8233eb91b57c230cf1161ac116a189ffd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Aug 2013 11:10:33 +0200 Subject: drm/radeon: enable UVD interrupts on CIK MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same as on evergreen. Signed-off-by: Christian König Reported-by: FrankR Huang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/cik.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 148c539684bb..a77b593185fb 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6733,6 +6733,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); -- cgit v1.2.3 From 607f2c2791ec81e5abca6213ff037e9405378be1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 18:40:46 -0400 Subject: drm/radeon: gcc fixes for radeon_atombios.c Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_atombios.c | 42 +++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ad913542ec1a..404e25d285ba 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); + gpio = &i2c_info->asGPIO_Info[0]; for (i = 0; i < num_indices; i++) { - gpio = &i2c_info->asGPIO_Info[i]; radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); @@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); break; } + gpio = (ATOM_GPIO_I2C_ASSIGMENT *) + ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); } } @@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); + gpio = &i2c_info->asGPIO_Info[0]; for (i = 0; i < num_indices; i++) { - gpio = &i2c_info->asGPIO_Info[i]; - radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); @@ -206,6 +207,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } + gpio = (ATOM_GPIO_I2C_ASSIGMENT *) + ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); } } } @@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); + pin = gpio_info->asGPIO_Pin; for (i = 0; i < num_indices; i++) { - pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; @@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, gpio.valid = true; break; } + pin = (ATOM_GPIO_PIN_ASSIGNMENT *) + ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT)); } } @@ -3413,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev, ATOM_VOLTAGE_FORMULA_V2 *formula = &voltage_object->v2.asFormula; if (formula->ucNumOfVoltageEntries) { + VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *) + ((u8 *)&formula->asVIDAdjustEntries[0] + + (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1))); *max_voltage = - le16_to_cpu(formula->asVIDAdjustEntries[ - formula->ucNumOfVoltageEntries - 1 - ].usVoltageValue); + le16_to_cpu(lut->usVoltageValue); return 0; } } @@ -3576,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev, if (voltage_object) { ATOM_VOLTAGE_FORMULA_V2 *formula = &voltage_object->v2.asFormula; + VOLTAGE_LUT_ENTRY *lut; if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES) return -EINVAL; + lut = &formula->asVIDAdjustEntries[0]; for (i = 0; i < formula->ucNumOfVoltageEntries; i++) { voltage_table->entries[i].value = - le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue); + le16_to_cpu(lut->usVoltageValue); ret = radeon_atom_get_voltage_gpio_settings(rdev, voltage_table->entries[i].value, voltage_type, @@ -3588,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev, &voltage_table->mask_low); if (ret) return ret; + lut = (VOLTAGE_LUT_ENTRY *) + ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY)); } voltage_table->count = formula->ucNumOfVoltageEntries; return 0; @@ -3607,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev, if (voltage_object) { ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio = &voltage_object->v3.asGpioVoltageObj; + VOLTAGE_LUT_ENTRY_V2 *lut; if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES) return -EINVAL; + lut = &gpio->asVolGpioLut[0]; for (i = 0; i < gpio->ucGpioEntryNum; i++) { voltage_table->entries[i].value = - le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue); + le16_to_cpu(lut->usVoltageValue); voltage_table->entries[i].smio_low = - le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId); + le32_to_cpu(lut->ulVoltageId); + lut = (VOLTAGE_LUT_ENTRY_V2 *) + ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2)); } voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal); voltage_table->count = gpio->ucGpioEntryNum; @@ -3739,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, union vram_info *vram_info; u32 mem_timing_size = gddr5 ? sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT); - u8 *p; memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table)); @@ -3758,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { ATOM_VRAM_MODULE_V4 *vram_module = (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; + ATOM_MEMORY_TIMING_FORMAT *format; for (i = 0; i < module_index; i++) { if (le16_to_cpu(vram_module->usModuleSize) == 0) @@ -3768,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, mclk_range_table->num_entries = (u8) ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / mem_timing_size); - p = (u8 *)&vram_module->asMemTiming[0]; + format = &vram_module->asMemTiming[0]; for (i = 0; i < mclk_range_table->num_entries; i++) { - ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p; mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); - p += mem_timing_size; + format = (ATOM_MEMORY_TIMING_FORMAT *) + ((u8 *)format + mem_timing_size); } } else return -EINVAL; -- cgit v1.2.3 From aa842d736e29439d6f1a1478cd7c780d972f7cc5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 18:47:07 -0400 Subject: drm/radeon: gcc fixes for rv6xx dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/rv6xx_dpm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index bdd888b4db2b..ab1f2016f21e 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1918,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); if (power_info->pplib.ucStateEntrySize - 1) { + u8 *idx; ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL); if (ps == NULL) { kfree(rdev->pm.dpm.ps); @@ -1926,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.ps[i].ps_priv = ps; rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info); + idx = (u8 *)&power_state->v1.ucClockStateIndices[0]; for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + - (power_state->v1.ucClockStateIndices[j] * - power_info->pplib.ucClockInfoSize)); + (idx[j] * power_info->pplib.ucClockInfoSize)); rv6xx_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], j, clock_info); -- cgit v1.2.3 From bdcc031bc7c2d0f1986237c8cea6871cfebf0853 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 18:51:08 -0400 Subject: drm/radeonn: gcc fixes for rv7xx/eg/btc dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/rv770_dpm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 44c1e782a696..8cbb85dae5aa 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); if (power_info->pplib.ucStateEntrySize - 1) { + u8 *idx; ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL); if (ps == NULL) { kfree(rdev->pm.dpm.ps); @@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, power_info->pplib.ucNonClockSize); + idx = (u8 *)&power_state->v1.ucClockStateIndices[0]; for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + - (power_state->v1.ucClockStateIndices[j] * - power_info->pplib.ucClockInfoSize)); + (idx[j] * power_info->pplib.ucClockInfoSize)); rv7xx_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], j, clock_info); -- cgit v1.2.3 From d5222ae7ad719c7afe957ae382d5e71ecffe0f7d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 18:59:41 -0400 Subject: drm/radeon: gcc fixes for sumo dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/sumo_dpm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 2cefe59ef586..864761c0120e 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) @@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev) } rdev->pm.dpm.ps[i].ps_priv = ps; k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; + clock_array_index = idx[j]; if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) break; + clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); sumo_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], k, clock_info); -- cgit v1.2.3 From 5e250d20c2b6ed0c5d1f3632c266eba4a7f979ba Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 19:02:14 -0400 Subject: drm/radeon: gcc fixes for trinity dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/trinity_dpm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index a1eb5f59939f..b07b7b8f1aff 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) @@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev) } rdev->pm.dpm.ps[i].ps_priv = ps; k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; + clock_array_index = idx[j]; if (clock_array_index >= clock_info_array->ucNumEntries) continue; if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) break; clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); trinity_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], k, clock_info); -- cgit v1.2.3 From 1e05c4d918b9d08167773cdd6edb05dde0975b40 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 19:04:02 -0400 Subject: drm/radeon: gcc fixes for ni dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ni_dpm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 238078c2b319..88aa41e341af 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -4038,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev) (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); if (power_info->pplib.ucStateEntrySize - 1) { + u8 *idx; ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL); if (ps == NULL) { kfree(rdev->pm.dpm.ps); @@ -4047,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev) ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, power_info->pplib.ucNonClockSize); + idx = (u8 *)&power_state->v1.ucClockStateIndices[0]; for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + - (power_state->v1.ucClockStateIndices[j] * - power_info->pplib.ucClockInfoSize)); + (idx[j] * power_info->pplib.ucClockInfoSize)); ni_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], j, clock_info); -- cgit v1.2.3 From 53f3b25287d8eed5a274d85fe7192c5812045fa3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 19:06:54 -0400 Subject: drm/radeon: gcc fixes for si dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/si_dpm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 75a435f14380..51966f459094 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6277,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) @@ -6293,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev) non_clock_info, non_clock_info_array->ucEntrySize); k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; + clock_array_index = idx[j]; if (clock_array_index >= clock_info_array->ucNumEntries) continue; if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) break; clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); si_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], k, clock_info); -- cgit v1.2.3 From b309ed98672705729bce271efb60f530290bbffd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 19:08:22 -0400 Subject: drm/radeon: gcc fixes for ci dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 7a6068968b70..dd2a07c44c3a 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4931,6 +4931,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) @@ -4947,14 +4948,16 @@ static int ci_parse_power_table(struct radeon_device *rdev) non_clock_info, non_clock_info_array->ucEntrySize); k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; + clock_array_index = idx[j]; if (clock_array_index >= clock_info_array->ucNumEntries) continue; if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) break; clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); ci_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], k, clock_info); -- cgit v1.2.3 From 9af37a7d4e195119ecfd570d02d17d4d159da912 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Aug 2013 19:09:54 -0400 Subject: drm/radeon: gcc fixes for kb/kv dpm Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/kv_dpm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 15a6f67813d7..ecd60809db4e 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2466,6 +2466,7 @@ static int kv_parse_power_table(struct radeon_device *rdev) rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) @@ -2479,14 +2480,16 @@ static int kv_parse_power_table(struct radeon_device *rdev) } rdev->pm.dpm.ps[i].ps_priv = ps; k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; + clock_array_index = idx[j]; if (clock_array_index >= clock_info_array->ucNumEntries) continue; if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) break; clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); kv_parse_pplib_clock_info(rdev, &rdev->pm.dpm.ps[i], k, clock_info); -- cgit v1.2.3 From 5b7d245009e734588e553092f5c0b0bd788b3a55 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 23 Aug 2013 15:28:42 -0400 Subject: drm/radeon: gcc fixes for extended dpm tables Newer versions of gcc seem to wander off into the weeds when dealing with variable sizes arrays in structs. Rather than indexing the arrays, use pointer arithmetic. See bugs: https://bugs.freedesktop.org/show_bug.cgi?id=66932 https://bugs.freedesktop.org/show_bug.cgi?id=66972 https://bugs.freedesktop.org/show_bug.cgi?id=66945 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_dpm.c | 74 ++++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 26a787836032..fa0de46fcc0d 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -799,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen u32 size = atom_table->ucNumEntries * sizeof(struct radeon_clock_voltage_dependency_entry); int i; + ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; radeon_table->entries = kzalloc(size, GFP_KERNEL); if (!radeon_table->entries) return -ENOMEM; + entry = &atom_table->entries[0]; for (i = 0; i < atom_table->ucNumEntries; i++) { - radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) | - (atom_table->entries[i].ucClockHigh << 16); - radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage); + radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | + (entry->ucClockHigh << 16); + radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); } radeon_table->count = atom_table->ucNumEntries; @@ -931,6 +935,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (ATOM_PPLIB_PhaseSheddingLimits_Table *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); + ATOM_PPLIB_PhaseSheddingLimits_Record *entry; rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = kzalloc(psl->ucNumEntries * @@ -941,15 +946,16 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) return -ENOMEM; } + entry = &psl->entries[0]; for (i = 0; i < psl->ucNumEntries; i++) { rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = - le16_to_cpu(psl->entries[i].usSclkLow) | - (psl->entries[i].ucSclkHigh << 16); + le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = - le16_to_cpu(psl->entries[i].usMclkLow) | - (psl->entries[i].ucMclkHigh << 16); + le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = - le16_to_cpu(psl->entries[i].usVoltage); + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); } rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = psl->ucNumEntries; @@ -976,26 +982,30 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (ATOM_PPLIB_CAC_Leakage_Table *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); + ATOM_PPLIB_CAC_Leakage_Record *entry; u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { r600_free_extended_power_table(rdev); return -ENOMEM; } + entry = &cac_table->entries[0]; for (i = 0; i < cac_table->ucNumEntries; i++) { if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = - le16_to_cpu(cac_table->entries[i].usVddc1); + le16_to_cpu(entry->usVddc1); rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = - le16_to_cpu(cac_table->entries[i].usVddc2); + le16_to_cpu(entry->usVddc2); rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = - le16_to_cpu(cac_table->entries[i].usVddc3); + le16_to_cpu(entry->usVddc3); } else { rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = - le16_to_cpu(cac_table->entries[i].usVddc); + le16_to_cpu(entry->usVddc); rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = - le32_to_cpu(cac_table->entries[i].ulLeakageValue); + le32_to_cpu(entry->ulLeakageValue); } + entry = (ATOM_PPLIB_CAC_Leakage_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); } rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; } @@ -1017,6 +1027,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1 + array->ucNumEntries * sizeof(VCEClockInfo)); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; u32 size = limits->numEntries * sizeof(struct radeon_vce_clock_voltage_dependency_entry); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = @@ -1027,15 +1038,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) } rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = limits->numEntries; + entry = &limits->entries[0]; for (i = 0; i < limits->numEntries; i++) { - VCEClockInfo *vce_clk = - &array->entries[limits->entries[i].ucVCEClockInfoIndex]; + VCEClockInfo *vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(limits->entries[i].usVoltage); + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); } } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && @@ -1048,6 +1063,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; u32 size = limits->numEntries * sizeof(struct radeon_uvd_clock_voltage_dependency_entry); rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = @@ -1058,15 +1074,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) } rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = limits->numEntries; + entry = &limits->entries[0]; for (i = 0; i < limits->numEntries; i++) { - UVDClockInfo *uvd_clk = - &array->entries[limits->entries[i].ucUVDClockInfoIndex]; + UVDClockInfo *uvd_clk = (UVDClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = le16_to_cpu(limits->entries[i].usVoltage); + entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); } } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && @@ -1075,6 +1095,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); + ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; u32 size = limits->numEntries * sizeof(struct radeon_clock_voltage_dependency_entry); rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = @@ -1085,12 +1106,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) } rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = limits->numEntries; + entry = &limits->entries[0]; for (i = 0; i < limits->numEntries; i++) { rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(limits->entries[i].usSAMClockLow) | - (limits->entries[i].ucSAMClockHigh << 16); + le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(limits->entries[i].usVoltage); + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); } } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && @@ -1130,6 +1153,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usACPTableOffset) + 1); + ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; u32 size = limits->numEntries * sizeof(struct radeon_clock_voltage_dependency_entry); rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = @@ -1140,12 +1164,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) } rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = limits->numEntries; + entry = &limits->entries[0]; for (i = 0; i < limits->numEntries; i++) { rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(limits->entries[i].usACPClockLow) | - (limits->entries[i].ucACPClockHigh << 16); + le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(limits->entries[i].usVoltage); + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); } } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && -- cgit v1.2.3 From 1ff60ddb84bb9ff6fa182710c4e08b66badf918c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Aug 2013 16:18:35 -0400 Subject: drm/radeon/dpm: make sure dc performance level limits are valid (BTC-SI) (v2) Check to make sure the dc limits are valid before using them. Some systems may not have a dc limits table. In that case just use the ac limits. This fixes hangs on systems when the power state is changed when on battery (dc) due to invalid performance state parameters. Should fix: https://bugs.freedesktop.org/show_bug.cgi?id=68708 v2: fix up limits in dpm_init() Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/btc_dpm.c | 6 ++++++ drivers/gpu/drm/radeon/ni_dpm.c | 6 ++++++ drivers/gpu/drm/radeon/si_dpm.c | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 9953e1fbc46d..084e69414fd1 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2699,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev) else rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000; + /* make sure dc limits are valid */ + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + return 0; } diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 88aa41e341af..f7b625c9e0e9 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -4272,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev) ni_pi->use_power_boost_limit = true; + /* make sure dc limits are valid */ + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + return 0; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 51966f459094..5be9b4e72350 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6449,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev) si_initialize_powertune_defaults(rdev); + /* make sure dc limits are valid */ + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + return 0; } -- cgit v1.2.3 From 679fe80fbe964ea7f9f71781c2ca65b630949da3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Aug 2013 16:24:33 -0400 Subject: drm/radeon/dpm: make sure dc performance level limits are valid (CI) Check to make sure the dc limits are valid before using them. Some systems may not have a dc limits table. In that case just use the ac limits. This fixes hangs on systems when the power state is changed when on battery (dc) due to invalid performance state parameters. Should fix: https://bugs.freedesktop.org/show_bug.cgi?id=68708 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index dd2a07c44c3a..916630fdc796 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5179,6 +5179,12 @@ int ci_dpm_init(struct radeon_device *rdev) pi->uvd_power_gated = false; + /* make sure dc limits are valid */ + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + return 0; } -- cgit v1.2.3 From a5b6f74e64f42ea2cfc0f04be59369471cbc8a94 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 2 Sep 2013 09:47:56 +1000 Subject: drm/tegra: fix up page flip flags. This was one level away from where I'd grepped. Signed-off-by: Dave Airlie --- drivers/gpu/host1x/drm/dc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c index 5360e5a57ecc..b1a05ad901c3 100644 --- a/drivers/gpu/host1x/drm/dc.c +++ b/drivers/gpu/host1x/drm/dc.c @@ -235,7 +235,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) } static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, uint32_t page_flip_flags) { struct tegra_dc *dc = to_tegra_dc(crtc); struct drm_device *drm = crtc->dev; -- cgit v1.2.3 From 2254f637dbd18f6432da526552d19a616ffbf8d6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 2 Sep 2013 09:52:55 +1000 Subject: drm/nouveau: fix up 32-bit ioctls and device wake up. Noticed by kbuild test robot. Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_ioc32.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ioctl.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c index 08214bcdcb12..c1a7e5a73a26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c @@ -63,7 +63,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, if (fn != NULL) ret = (*fn)(filp, cmd, arg); else - ret = drm_ioctl(filp, cmd, arg); + ret = nouveau_drm_ioctl(filp, cmd, arg); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h index ef2b2906d9e6..3b9f2e5463a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h +++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h @@ -2,5 +2,6 @@ #define __NOUVEAU_IOCTL_H__ long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg); +long nouveau_drm_ioctl(struct file *, unsigned int cmd, unsigned long arg); #endif -- cgit v1.2.3 From 3b336ec4c5460833ad7573d0b6e22793f6a389ab Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Wed, 14 Aug 2013 16:47:37 -0400 Subject: drm: Add drm_bridge This patch adds the notion of a drm_bridge. A bridge is a chained device which hangs off an encoder. The drm driver using the bridge should provide the association between encoder and bridge. Once a bridge is associated with an encoder, it will participate in mode set, and dpms (via the enable/disable hooks). Signed-off-by: Sean Paul Acked-by: Daniel Vetter Reviewed-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 50 ++++++++++++++++++++++ drivers/gpu/drm/drm_crtc_helper.c | 89 ++++++++++++++++++++++++++++++--------- include/drm/drm_crtc.h | 55 ++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 452591b67996..5ebc972c0b6d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -799,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev) } EXPORT_SYMBOL(drm_connector_unplug_all); +int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, + const struct drm_bridge_funcs *funcs) +{ + int ret; + + drm_modeset_lock_all(dev); + + ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE); + if (ret) + goto out; + + bridge->dev = dev; + bridge->funcs = funcs; + + list_add_tail(&bridge->head, &dev->mode_config.bridge_list); + dev->mode_config.num_bridge++; + + out: + drm_modeset_unlock_all(dev); + return ret; +} +EXPORT_SYMBOL(drm_bridge_init); + +void drm_bridge_cleanup(struct drm_bridge *bridge) +{ + struct drm_device *dev = bridge->dev; + + drm_modeset_lock_all(dev); + drm_mode_object_put(dev, &bridge->base); + list_del(&bridge->head); + dev->mode_config.num_bridge--; + drm_modeset_unlock_all(dev); +} +EXPORT_SYMBOL(drm_bridge_cleanup); + int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, @@ -1184,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; + total_objects += dev->mode_config.num_bridge; group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); if (!group->id_list) @@ -1192,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; + group->num_bridges = 0; return 0; } @@ -1201,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; + struct drm_bridge *bridge; int ret; if ((ret = drm_mode_group_init(dev, group))) @@ -1217,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; + list_for_each_entry(bridge, &dev->mode_config.bridge_list, head) + group->id_list[group->num_crtcs + group->num_encoders + + group->num_connectors + group->num_bridges++] = + bridge->base.id; + return 0; } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); @@ -3902,6 +3945,7 @@ void drm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); + INIT_LIST_HEAD(&dev->mode_config.bridge_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); @@ -3938,6 +3982,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; + struct drm_bridge *bridge, *brt; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; @@ -3948,6 +3993,11 @@ void drm_mode_config_cleanup(struct drm_device *dev) encoder->funcs->destroy(encoder); } + list_for_each_entry_safe(bridge, brt, + &dev->mode_config.bridge_list, head) { + bridge->funcs->destroy(bridge); + } + list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 6a647493ca7f..c722c3b5404d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + if (encoder->bridge) + encoder->bridge->funcs->disable(encoder->bridge); + if (encoder_funcs->disable) (*encoder_funcs->disable)(encoder); else (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + + if (encoder->bridge) + encoder->bridge->funcs->post_disable(encoder->bridge); } /** @@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; + + if (encoder->bridge && encoder->bridge->funcs->mode_fixup) { + ret = encoder->bridge->funcs->mode_fixup( + encoder->bridge, mode, adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("Bridge fixup failed\n"); + goto done; + } + } + encoder_funcs = encoder->helper_private; if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { @@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; + + if (encoder->bridge) + encoder->bridge->funcs->disable(encoder->bridge); + encoder_funcs = encoder->helper_private; /* Disable the encoders as the first thing we do. */ encoder_funcs->prepare(encoder); + + if (encoder->bridge) + encoder->bridge->funcs->post_disable(encoder->bridge); } drm_crtc_prepare_encoders(dev); @@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, mode->base.id, mode->name); encoder_funcs = encoder->helper_private; encoder_funcs->mode_set(encoder, mode, adjusted_mode); + + if (encoder->bridge && encoder->bridge->funcs->mode_set) + encoder->bridge->funcs->mode_set(encoder->bridge, mode, + adjusted_mode); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ @@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; + if (encoder->bridge) + encoder->bridge->funcs->pre_enable(encoder->bridge); + encoder_funcs = encoder->helper_private; encoder_funcs->commit(encoder); + if (encoder->bridge) + encoder->bridge->funcs->enable(encoder->bridge); } /* Store real post-adjustment hardware mode. */ @@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) return dpms; } +/* Helper which handles bridge ordering around encoder dpms */ +static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_bridge *bridge = encoder->bridge; + struct drm_encoder_helper_funcs *encoder_funcs; + + if (bridge) { + if (mode == DRM_MODE_DPMS_ON) + bridge->funcs->pre_enable(bridge); + else + bridge->funcs->disable(bridge); + } + + encoder_funcs = encoder->helper_private; + if (encoder_funcs->dpms) + encoder_funcs->dpms(encoder, mode); + + if (bridge) { + if (mode == DRM_MODE_DPMS_ON) + bridge->funcs->enable(bridge); + else + bridge->funcs->post_disable(bridge); + } +} + static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) { int dpms = DRM_MODE_DPMS_OFF; @@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = connector->encoder; struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; - int old_dpms; + int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; if (mode == connector->dpms) return; @@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) old_dpms = connector->dpms; connector->dpms = mode; + if (encoder) + encoder_dpms = drm_helper_choose_encoder_dpms(encoder); + /* from off to on, do crtc then encoder */ if (mode < old_dpms) { if (crtc) { @@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } - if (encoder) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - if (encoder_funcs->dpms) - (*encoder_funcs->dpms) (encoder, - drm_helper_choose_encoder_dpms(encoder)); - } + if (encoder) + drm_helper_encoder_dpms(encoder, encoder_dpms); } /* from on to off, do encoder then crtc */ if (mode > old_dpms) { - if (encoder) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - if (encoder_funcs->dpms) - (*encoder_funcs->dpms) (encoder, - drm_helper_choose_encoder_dpms(encoder)); - } + if (encoder) + drm_helper_encoder_dpms(encoder, encoder_dpms); if (crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) @@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; - struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc_helper_funcs *crtc_funcs; - int ret; + int ret, encoder_dpms; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev) if(encoder->crtc != crtc) continue; - encoder_funcs = encoder->helper_private; - if (encoder_funcs->dpms) - (*encoder_funcs->dpms) (encoder, - drm_helper_choose_encoder_dpms(encoder)); + encoder_dpms = drm_helper_choose_encoder_dpms( + encoder); + + drm_helper_encoder_dpms(encoder, encoder_dpms); } crtc_funcs = crtc->helper_private; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 78ca1512c73f..24f499569a2f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -49,6 +49,7 @@ struct drm_clip_rect; #define DRM_MODE_OBJECT_FB 0xfbfbfbfb #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee +#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd struct drm_mode_object { uint32_t id; @@ -305,6 +306,7 @@ struct drm_connector; struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; +struct drm_bridge; /** * drm_crtc_funcs - control CRTCs for a given device @@ -506,6 +508,7 @@ struct drm_encoder_funcs { * @possible_crtcs: bitmask of potential CRTC bindings * @possible_clones: bitmask of potential sibling encoders for cloning * @crtc: currently bound CRTC + * @bridge: bridge associated to the encoder * @funcs: control functions * @helper_private: mid-layer private data * @@ -522,6 +525,7 @@ struct drm_encoder { uint32_t possible_clones; struct drm_crtc *crtc; + struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; void *helper_private; }; @@ -681,6 +685,48 @@ struct drm_plane { struct drm_object_properties properties; }; +/** + * drm_bridge_funcs - drm_bridge control functions + * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge + * @disable: Called right before encoder prepare, disables the bridge + * @post_disable: Called right after encoder prepare, for lockstepped disable + * @mode_set: Set this mode to the bridge + * @pre_enable: Called right before encoder commit, for lockstepped commit + * @enable: Called right after encoder commit, enables the bridge + * @destroy: make object go away + */ +struct drm_bridge_funcs { + bool (*mode_fixup)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + void (*disable)(struct drm_bridge *bridge); + void (*post_disable)(struct drm_bridge *bridge); + void (*mode_set)(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + void (*pre_enable)(struct drm_bridge *bridge); + void (*enable)(struct drm_bridge *bridge); + void (*destroy)(struct drm_bridge *bridge); +}; + +/** + * drm_bridge - central DRM bridge control structure + * @dev: DRM device this bridge belongs to + * @head: list management + * @base: base mode object + * @funcs: control functions + * @driver_private: pointer to the bridge driver's internal context + */ +struct drm_bridge { + struct drm_device *dev; + struct list_head head; + + struct drm_mode_object base; + + const struct drm_bridge_funcs *funcs; + void *driver_private; +}; + /** * drm_mode_set - new values for a CRTC config change * @head: list management @@ -741,6 +787,7 @@ struct drm_mode_group { uint32_t num_crtcs; uint32_t num_encoders; uint32_t num_connectors; + uint32_t num_bridges; /* list of object IDs for this group */ uint32_t *id_list; @@ -755,6 +802,8 @@ struct drm_mode_group { * @fb_list: list of framebuffers available * @num_connector: number of connectors on this device * @connector_list: list of connector objects + * @num_bridge: number of bridges on this device + * @bridge_list: list of bridge objects * @num_encoder: number of encoders on this device * @encoder_list: list of encoder objects * @num_crtc: number of CRTCs on this device @@ -792,6 +841,8 @@ struct drm_mode_config { int num_connector; struct list_head connector_list; + int num_bridge; + struct list_head bridge_list; int num_encoder; struct list_head encoder_list; int num_plane; @@ -881,6 +932,10 @@ extern void drm_connector_cleanup(struct drm_connector *connector); /* helper to unplug all connectors from sysfs for device */ extern void drm_connector_unplug_all(struct drm_device *dev); +extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, + const struct drm_bridge_funcs *funcs); +extern void drm_bridge_cleanup(struct drm_bridge *bridge); + extern int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, -- cgit v1.2.3 From a3376e3ec81c5dd0622cbc187db76d2824d31c1c Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 30 Aug 2013 13:02:15 -0400 Subject: drm/msm: convert to drm_bridge Drop the msm_connector base class, and special calls to base class methods from the encoder, and use instead drm_bridge. This allows for a cleaner division between the hdmi (and in future dsi) blocks, from the mdp block. Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/msm/Makefile | 2 +- drivers/gpu/drm/msm/hdmi/hdmi.c | 49 +++++++- drivers/gpu/drm/msm/hdmi/hdmi.h | 31 +++++- drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | 167 ++++++++++++++++++++++++++++ drivers/gpu/drm/msm/hdmi/hdmi_connector.c | 156 ++++++-------------------- drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c | 12 -- drivers/gpu/drm/msm/mdp4/mdp4_kms.c | 9 +- drivers/gpu/drm/msm/msm_connector.c | 34 ------ drivers/gpu/drm/msm/msm_connector.h | 68 ----------- drivers/gpu/drm/msm/msm_drv.h | 6 +- 10 files changed, 274 insertions(+), 260 deletions(-) create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_bridge.c delete mode 100644 drivers/gpu/drm/msm/msm_connector.c delete mode 100644 drivers/gpu/drm/msm/msm_connector.h diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 439dfb5b417b..e17914889e54 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -7,6 +7,7 @@ msm-y := \ adreno/adreno_gpu.o \ adreno/a3xx_gpu.o \ hdmi/hdmi.o \ + hdmi/hdmi_bridge.o \ hdmi/hdmi_connector.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy_8960.o \ @@ -17,7 +18,6 @@ msm-y := \ mdp4/mdp4_irq.o \ mdp4/mdp4_kms.o \ mdp4/mdp4_plane.o \ - msm_connector.o \ msm_drv.o \ msm_fb.o \ msm_gem.o \ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 12ecfb928f75..50d11df35b21 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -56,8 +56,9 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id) return IRQ_HANDLED; } -void hdmi_destroy(struct hdmi *hdmi) +void hdmi_destroy(struct kref *kref) { + struct hdmi *hdmi = container_of(kref, struct hdmi, refcount); struct hdmi_phy *phy = hdmi->phy; if (phy) @@ -70,9 +71,10 @@ void hdmi_destroy(struct hdmi *hdmi) } /* initialize connector */ -int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, - struct drm_connector *connector) +int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) { + struct hdmi *hdmi = NULL; + struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = hdmi_pdev; struct hdmi_platform_config *config; int ret; @@ -85,11 +87,19 @@ int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, config = pdev->dev.platform_data; + hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) { + ret = -ENOMEM; + goto fail; + } + + kref_init(&hdmi->refcount); + get_device(&pdev->dev); hdmi->dev = dev; hdmi->pdev = pdev; - hdmi->connector = connector; + hdmi->encoder = encoder; /* not sure about which phy maps to which msm.. probably I miss some */ if (config->phy_init) @@ -152,6 +162,22 @@ int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, goto fail; } + hdmi->bridge = hdmi_bridge_init(hdmi); + if (IS_ERR(hdmi->bridge)) { + ret = PTR_ERR(hdmi->bridge); + dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret); + hdmi->bridge = NULL; + goto fail; + } + + hdmi->connector = hdmi_connector_init(hdmi); + if (IS_ERR(hdmi->connector)) { + ret = PTR_ERR(hdmi->connector); + dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret); + hdmi->connector = NULL; + goto fail; + } + hdmi->irq = platform_get_irq(pdev, 0); if (hdmi->irq < 0) { ret = hdmi->irq; @@ -168,11 +194,22 @@ int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, goto fail; } + encoder->bridge = hdmi->bridge; + + priv->bridges[priv->num_bridges++] = hdmi->bridge; + priv->connectors[priv->num_connectors++] = hdmi->connector; + return 0; fail: - if (hdmi) - hdmi_destroy(hdmi); + if (hdmi) { + /* bridge/connector are normally destroyed by drm: */ + if (hdmi->bridge) + hdmi->bridge->funcs->destroy(hdmi->bridge); + if (hdmi->connector) + hdmi->connector->funcs->destroy(hdmi->connector); + hdmi_destroy(&hdmi->refcount); + } return ret; } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 34703fea22ca..2c2ec566394c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -30,6 +30,8 @@ struct hdmi_phy; struct hdmi { + struct kref refcount; + struct drm_device *dev; struct platform_device *pdev; @@ -45,6 +47,10 @@ struct hdmi { struct hdmi_phy *phy; struct i2c_adapter *i2c; struct drm_connector *connector; + struct drm_bridge *bridge; + + /* the encoder we are hooked to (outside of hdmi block) */ + struct drm_encoder *encoder; bool hdmi_mode; /* are we in hdmi mode? */ @@ -58,9 +64,7 @@ struct hdmi_platform_config { }; void hdmi_set_mode(struct hdmi *hdmi, bool power_on); -void hdmi_destroy(struct hdmi *hdmi); -int hdmi_init(struct hdmi *hdmi, struct drm_device *dev, - struct drm_connector *connector); +void hdmi_destroy(struct kref *kref); static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) { @@ -72,6 +76,17 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) return msm_readl(hdmi->mmio + reg); } +static inline struct hdmi * hdmi_reference(struct hdmi *hdmi) +{ + kref_get(&hdmi->refcount); + return hdmi; +} + +static inline void hdmi_unreference(struct hdmi *hdmi) +{ + kref_put(&hdmi->refcount, hdmi_destroy); +} + /* * The phy appears to be different, for example between 8960 and 8x60, * so split the phy related functions out and load the correct one at @@ -89,17 +104,21 @@ struct hdmi_phy { const struct hdmi_phy_funcs *funcs; }; -/* - * phy can be different on different generations: - */ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); +/* + * hdmi bridge: + */ + +struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi); + /* * hdmi connector: */ void hdmi_connector_irq(struct drm_connector *connector); +struct drm_connector *hdmi_connector_init(struct hdmi *hdmi); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c new file mode 100644 index 000000000000..5a8ee3473cf5 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "hdmi.h" + +struct hdmi_bridge { + struct drm_bridge base; + + struct hdmi *hdmi; + + unsigned long int pixclock; +}; +#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) + +static void hdmi_bridge_destroy(struct drm_bridge *bridge) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + hdmi_unreference(hdmi_bridge->hdmi); + drm_bridge_cleanup(bridge); + kfree(hdmi_bridge); +} + +static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + struct hdmi_phy *phy = hdmi->phy; + + DBG("power up"); + phy->funcs->powerup(phy, hdmi_bridge->pixclock); + hdmi_set_mode(hdmi, true); +} + +static void hdmi_bridge_enable(struct drm_bridge *bridge) +{ +} + +static void hdmi_bridge_disable(struct drm_bridge *bridge) +{ +} + +static void hdmi_bridge_post_disable(struct drm_bridge *bridge) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + struct hdmi_phy *phy = hdmi->phy; + + DBG("power down"); + hdmi_set_mode(hdmi, false); + phy->funcs->powerdown(phy); +} + +static void hdmi_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + int hstart, hend, vstart, vend; + uint32_t frame_ctrl; + + mode = adjusted_mode; + + hdmi_bridge->pixclock = mode->clock * 1000; + + hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; + + hstart = mode->htotal - mode->hsync_start; + hend = mode->htotal - mode->hsync_start + mode->hdisplay; + + vstart = mode->vtotal - mode->vsync_start - 1; + vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1; + + DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d", + mode->htotal, mode->vtotal, hstart, hend, vstart, vend); + + hdmi_write(hdmi, REG_HDMI_TOTAL, + HDMI_TOTAL_H_TOTAL(mode->htotal - 1) | + HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); + + hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC, + HDMI_ACTIVE_HSYNC_START(hstart) | + HDMI_ACTIVE_HSYNC_END(hend)); + hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC, + HDMI_ACTIVE_VSYNC_START(vstart) | + HDMI_ACTIVE_VSYNC_END(vend)); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, + HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal)); + hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, + HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) | + HDMI_VSYNC_ACTIVE_F2_END(vend + 1)); + } else { + hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, + HDMI_VSYNC_TOTAL_F2_V_TOTAL(0)); + hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, + HDMI_VSYNC_ACTIVE_F2_START(0) | + HDMI_VSYNC_ACTIVE_F2_END(0)); + } + + frame_ctrl = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN; + DBG("frame_ctrl=%08x", frame_ctrl); + hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); + + // TODO until we have audio, this might be safest: + if (hdmi->hdmi_mode) + hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); +} + +static const struct drm_bridge_funcs hdmi_bridge_funcs = { + .pre_enable = hdmi_bridge_pre_enable, + .enable = hdmi_bridge_enable, + .disable = hdmi_bridge_disable, + .post_disable = hdmi_bridge_post_disable, + .mode_set = hdmi_bridge_mode_set, + .destroy = hdmi_bridge_destroy, +}; + + +/* initialize bridge */ +struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) +{ + struct drm_bridge *bridge = NULL; + struct hdmi_bridge *hdmi_bridge; + int ret; + + hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL); + if (!hdmi_bridge) { + ret = -ENOMEM; + goto fail; + } + + hdmi_bridge->hdmi = hdmi_reference(hdmi); + + bridge = &hdmi_bridge->base; + + drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs); + + return bridge; + +fail: + if (bridge) + hdmi_bridge_destroy(bridge); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 7d63f5ffa7ba..823eee521a31 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -17,14 +17,11 @@ #include -#include "msm_connector.h" #include "hdmi.h" struct hdmi_connector { - struct msm_connector base; - struct hdmi hdmi; - unsigned long int pixclock; - bool enabled; + struct drm_connector base; + struct hdmi *hdmi; }; #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) @@ -90,8 +87,8 @@ error1: static int hpd_enable(struct hdmi_connector *hdmi_connector) { - struct hdmi *hdmi = &hdmi_connector->hdmi; - struct drm_device *dev = hdmi_connector->base.base.dev; + struct hdmi *hdmi = hdmi_connector->hdmi; + struct drm_device *dev = hdmi_connector->base.dev; struct hdmi_phy *phy = hdmi->phy; uint32_t hpd_ctrl; int ret; @@ -158,8 +155,8 @@ fail: static int hdp_disable(struct hdmi_connector *hdmi_connector) { - struct hdmi *hdmi = &hdmi_connector->hdmi; - struct drm_device *dev = hdmi_connector->base.base.dev; + struct hdmi *hdmi = hdmi_connector->hdmi; + struct drm_device *dev = hdmi_connector->base.dev; int ret = 0; /* Disable HPD interrupt */ @@ -194,9 +191,8 @@ fail: void hdmi_connector_irq(struct drm_connector *connector) { - struct msm_connector *msm_connector = to_msm_connector(connector); - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); - struct hdmi *hdmi = &hdmi_connector->hdmi; + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); + struct hdmi *hdmi = hdmi_connector->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; /* Process HPD: */ @@ -226,9 +222,8 @@ void hdmi_connector_irq(struct drm_connector *connector) static enum drm_connector_status hdmi_connector_detect( struct drm_connector *connector, bool force) { - struct msm_connector *msm_connector = to_msm_connector(connector); - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); - struct hdmi *hdmi = &hdmi_connector->hdmi; + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); + struct hdmi *hdmi = hdmi_connector->hdmi; uint32_t hpd_int_status; int retry = 20; @@ -249,24 +244,22 @@ static enum drm_connector_status hdmi_connector_detect( static void hdmi_connector_destroy(struct drm_connector *connector) { - struct msm_connector *msm_connector = to_msm_connector(connector); - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); hdp_disable(hdmi_connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - hdmi_destroy(&hdmi_connector->hdmi); + hdmi_unreference(hdmi_connector->hdmi); kfree(hdmi_connector); } static int hdmi_connector_get_modes(struct drm_connector *connector) { - struct msm_connector *msm_connector = to_msm_connector(connector); - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); - struct hdmi *hdmi = &hdmi_connector->hdmi; + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); + struct hdmi *hdmi = hdmi_connector->hdmi; struct edid *edid; uint32_t hdmi_ctrl; int ret = 0; @@ -291,14 +284,14 @@ static int hdmi_connector_get_modes(struct drm_connector *connector) static int hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct msm_connector *msm_connector = to_msm_connector(connector); + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct msm_drm_private *priv = connector->dev->dev_private; struct msm_kms *kms = priv->kms; long actual, requested; requested = 1000 * mode->clock; actual = kms->funcs->round_pixclk(kms, - requested, msm_connector->encoder); + requested, hdmi_connector->hdmi->encoder); DBG("requested=%ld, actual=%ld", requested, actual); @@ -308,6 +301,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, return 0; } +static struct drm_encoder * +hdmi_connector_best_encoder(struct drm_connector *connector) +{ + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); + return hdmi_connector->hdmi->encoder; +} + static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = hdmi_connector_detect, @@ -318,101 +318,11 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_connector_get_modes, .mode_valid = hdmi_connector_mode_valid, - .best_encoder = msm_connector_attached_encoder, -}; - -static void hdmi_connector_dpms(struct msm_connector *msm_connector, int mode) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); - struct hdmi *hdmi = &hdmi_connector->hdmi; - struct hdmi_phy *phy = hdmi->phy; - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("mode=%d", mode); - - if (enabled == hdmi_connector->enabled) - return; - - if (enabled) { - phy->funcs->powerup(phy, hdmi_connector->pixclock); - hdmi_set_mode(hdmi, true); - } else { - hdmi_set_mode(hdmi, false); - phy->funcs->powerdown(phy); - } - - hdmi_connector->enabled = enabled; -} - -static void hdmi_connector_mode_set(struct msm_connector *msm_connector, - struct drm_display_mode *mode) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector); - struct hdmi *hdmi = &hdmi_connector->hdmi; - int hstart, hend, vstart, vend; - uint32_t frame_ctrl; - - hdmi_connector->pixclock = mode->clock * 1000; - - hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; - - hstart = mode->htotal - mode->hsync_start; - hend = mode->htotal - mode->hsync_start + mode->hdisplay; - - vstart = mode->vtotal - mode->vsync_start - 1; - vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1; - - DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d", - mode->htotal, mode->vtotal, hstart, hend, vstart, vend); - - hdmi_write(hdmi, REG_HDMI_TOTAL, - HDMI_TOTAL_H_TOTAL(mode->htotal - 1) | - HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); - - hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC, - HDMI_ACTIVE_HSYNC_START(hstart) | - HDMI_ACTIVE_HSYNC_END(hend)); - hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC, - HDMI_ACTIVE_VSYNC_START(vstart) | - HDMI_ACTIVE_VSYNC_END(vend)); - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, - HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal)); - hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, - HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) | - HDMI_VSYNC_ACTIVE_F2_END(vend + 1)); - } else { - hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, - HDMI_VSYNC_TOTAL_F2_V_TOTAL(0)); - hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, - HDMI_VSYNC_ACTIVE_F2_START(0) | - HDMI_VSYNC_ACTIVE_F2_END(0)); - } - - frame_ctrl = 0; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN; - DBG("frame_ctrl=%08x", frame_ctrl); - hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); - - // TODO until we have audio, this might be safest: - if (hdmi->hdmi_mode) - hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); -} - -static const struct msm_connector_funcs msm_connector_funcs = { - .dpms = hdmi_connector_dpms, - .mode_set = hdmi_connector_mode_set, + .best_encoder = hdmi_connector_best_encoder, }; /* initialize connector */ -struct drm_connector *hdmi_connector_init(struct drm_device *dev, - struct drm_encoder *encoder) +struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) { struct drm_connector *connector = NULL; struct hdmi_connector *hdmi_connector; @@ -424,11 +334,11 @@ struct drm_connector *hdmi_connector_init(struct drm_device *dev, goto fail; } - connector = &hdmi_connector->base.base; + hdmi_connector->hdmi = hdmi_reference(hdmi); + + connector = &hdmi_connector->base; - msm_connector_init(&hdmi_connector->base, - &msm_connector_funcs, encoder); - drm_connector_init(dev, connector, &hdmi_connector_funcs, + drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); @@ -439,17 +349,13 @@ struct drm_connector *hdmi_connector_init(struct drm_device *dev, drm_sysfs_connector_add(connector); - ret = hdmi_init(&hdmi_connector->hdmi, dev, connector); - if (ret) - goto fail; - ret = hpd_enable(hdmi_connector); if (ret) { - dev_err(dev->dev, "failed to enable HPD: %d\n", ret); + dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret); goto fail; } - drm_mode_connector_attach_encoder(connector, encoder); + drm_mode_connector_attach_encoder(connector, hdmi->encoder); return connector; diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c index 06d49e309d34..5e0dcae70ab5 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c @@ -18,7 +18,6 @@ #include #include "mdp4_kms.h" -#include "msm_connector.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" @@ -101,7 +100,6 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct msm_connector *msm_connector = get_connector(encoder); struct mdp4_kms *mdp4_kms = get_kms(encoder); bool enabled = (mode == DRM_MODE_DPMS_ON); @@ -116,9 +114,6 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) bs_set(mdp4_dtv_encoder, 1); - if (msm_connector) - msm_connector->funcs->dpms(msm_connector, mode); - DBG("setting src_clk=%lu", pc); ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); @@ -150,9 +145,6 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - if (msm_connector) - msm_connector->funcs->dpms(msm_connector, mode); - bs_set(mdp4_dtv_encoder, 0); } @@ -171,7 +163,6 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct msm_connector *msm_connector = get_connector(encoder); struct mdp4_kms *mdp4_kms = get_kms(encoder); uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; uint32_t display_v_start, display_v_end; @@ -230,9 +221,6 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, MDP4_DTV_ACTIVE_HCTL_END(0)); mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0); mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); - - if (msm_connector) - msm_connector->funcs->mode_set(msm_connector, mode); } static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c index 960cd894da78..5db5bbaedae2 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c @@ -191,7 +191,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) struct drm_plane *plane; struct drm_crtc *crtc; struct drm_encoder *encoder; - struct drm_connector *connector; int ret; /* @@ -224,13 +223,11 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ priv->encoders[priv->num_encoders++] = encoder; - connector = hdmi_connector_init(dev, encoder); - if (IS_ERR(connector)) { - dev_err(dev->dev, "failed to construct HDMI connector\n"); - ret = PTR_ERR(connector); + ret = hdmi_init(dev, encoder); + if (ret) { + dev_err(dev->dev, "failed to initialize HDMI\n"); goto fail; } - priv->connectors[priv->num_connectors++] = connector; return 0; diff --git a/drivers/gpu/drm/msm/msm_connector.c b/drivers/gpu/drm/msm/msm_connector.c deleted file mode 100644 index aeea8879e36f..000000000000 --- a/drivers/gpu/drm/msm/msm_connector.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "msm_drv.h" -#include "msm_connector.h" - -void msm_connector_init(struct msm_connector *connector, - const struct msm_connector_funcs *funcs, - struct drm_encoder *encoder) -{ - connector->funcs = funcs; - connector->encoder = encoder; -} - -struct drm_encoder *msm_connector_attached_encoder( - struct drm_connector *connector) -{ - struct msm_connector *msm_connector = to_msm_connector(connector); - return msm_connector->encoder; -} diff --git a/drivers/gpu/drm/msm/msm_connector.h b/drivers/gpu/drm/msm/msm_connector.h deleted file mode 100644 index 0b41866adc08..000000000000 --- a/drivers/gpu/drm/msm/msm_connector.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MSM_CONNECTOR_H__ -#define __MSM_CONNECTOR_H__ - -#include "msm_drv.h" - -/* - * Base class for MSM connectors. Typically a connector is a bit more - * passive. But with the split between (for example) DTV within MDP4, - * and HDMI encoder, we really need two parts to an encoder. Instead - * what we do is have the part external to the display controller block - * in the connector, which is called from the encoder to delegate the - * appropriate parts of modeset. - */ - -struct msm_connector; - -struct msm_connector_funcs { - void (*dpms)(struct msm_connector *connector, int mode); - void (*mode_set)(struct msm_connector *connector, - struct drm_display_mode *mode); -}; - -struct msm_connector { - struct drm_connector base; - struct drm_encoder *encoder; - const struct msm_connector_funcs *funcs; -}; -#define to_msm_connector(x) container_of(x, struct msm_connector, base) - -void msm_connector_init(struct msm_connector *connector, - const struct msm_connector_funcs *funcs, - struct drm_encoder *encoder); - -struct drm_encoder *msm_connector_attached_encoder( - struct drm_connector *connector); - -static inline struct msm_connector *get_connector(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - int i; - - for (i = 0; i < priv->num_connectors; i++) { - struct drm_connector *connector = priv->connectors[i]; - if (msm_connector_attached_encoder(connector) == encoder) - return to_msm_connector(connector); - } - - return NULL; -} - -#endif /* __MSM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 34c36b2911d9..80d75094bf0a 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -83,6 +83,9 @@ struct msm_drm_private { unsigned int num_encoders; struct drm_encoder *encoders[8]; + unsigned int num_bridges; + struct drm_bridge *bridges[8]; + unsigned int num_connectors; struct drm_connector *connectors[8]; }; @@ -170,8 +173,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); -struct drm_connector *hdmi_connector_init(struct drm_device *dev, - struct drm_encoder *encoder); +int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); void __init hdmi_register(void); void __exit hdmi_unregister(void); -- cgit v1.2.3 From 101b96f32956ee99bf1468afaf572b88cda9f88b Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Mon, 26 Aug 2013 15:16:49 +0200 Subject: drm: fix DRM_IOCTL_MODE_GETFB handle-leak DRM_IOCTL_MODE_GETFB is used to retrieve information about a given framebuffer ID. It is a read-only helper and was thus declassified for unprivileged access in: commit a14b1b42477c5ef089fcda88cbaae50d979eb8f9 Author: Mandeep Singh Baines Date: Fri Jan 20 12:11:16 2012 -0800 drm: remove master fd restriction on mode setting getters However, alongside width, height and stride information, DRM_IOCTL_MODE_GETFB also passes back a handle to the underlying buffer of the framebuffer. This handle allows users to mmap() it and read or write into it. Obviously, this should be restricted to DRM-Master. With the current setup, *any* process with access to /dev/dri/card0 (which means any process with access to hardware-accelerated rendering) can access the current screen framebuffer and modify it ad libitum. For backwards-compatibility reasons we want to keep the DRM_IOCTL_MODE_GETFB call unprivileged. Besides, it provides quite useful information regarding screen setup. So we simply test whether the caller is the current DRM-Master and if not, we return 0 as handle, which is always invalid. A following DRM_IOCTL_GEM_CLOSE on this handle will fail with EINVAL, but we accept this. Users shouldn't test for errors during GEM_CLOSE, anyway. And it is still better as a failing MODE_GETFB call. v2: add capable(CAP_SYS_ADMIN) check for compatibility with i-g-t Cc: Signed-off-by: David Herrmann Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5ebc972c0b6d..bff2fa941f60 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2641,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev, r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; - if (fb->funcs->create_handle) - ret = fb->funcs->create_handle(fb, file_priv, &r->handle); - else + if (fb->funcs->create_handle) { + if (file_priv->is_master || capable(CAP_SYS_ADMIN)) { + ret = fb->funcs->create_handle(fb, file_priv, + &r->handle); + } else { + /* GET_FB() is an unprivileged ioctl so we must not + * return a buffer-handle to non-master processes! For + * backwards-compatibility reasons, we cannot make + * GET_FB() privileged, so just return an invalid handle + * for non-masters. */ + r->handle = 0; + ret = 0; + } + } else { ret = -ENODEV; + } drm_framebuffer_unreference(fb); -- cgit v1.2.3 From 10ba50129ab0bdbc0ee712e50913d1c8db88c5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Sun, 25 Aug 2013 18:29:01 +0200 Subject: drm/i915: Support render nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable support for drm render nodes for i915 by flagging the ioctls that are safe and just needed for rendering. v2: mark reg_read, set_caching and get_caching (ickle, danvet) Signed-off-by: Kristian Høgsberg Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_dma.c | 42 ++++++++++++++++++++--------------------- drivers/gpu/drm/i915/i915_drv.c | 3 ++- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3e4e6073d171..fdaa0915ce56 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1866,7 +1866,7 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), @@ -1879,35 +1879,35 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 735dd5625e9e..ccb28ead3501 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1022,7 +1022,8 @@ static struct drm_driver driver = { */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | + DRIVER_RENDER, .load = i915_driver_load, .unload = i915_driver_unload, .open = i915_driver_open, -- cgit v1.2.3 From 7d7612582c15af8772c2fb2473d5fc7eebfefae2 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 25 Aug 2013 18:29:02 +0200 Subject: drm/nouveau: Support render nodes Enable support for drm render nodes for nouveau by flagging the ioctls that are safe and just needed for rendering. Cc: Ben Skeggs Cc: Maarten Lankhorst Signed-off-by: Martin Peres Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_drm.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 62c6118e94c0..8863644024b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -748,18 +748,18 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) static const struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), }; long nouveau_drm_ioctl(struct file *filp, @@ -799,7 +799,7 @@ static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, .load = nouveau_drm_load, .unload = nouveau_drm_unload, -- cgit v1.2.3 From f33bcab9e816c5bf56b74c3007790f2a256910eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sun, 25 Aug 2013 18:29:03 +0200 Subject: drm/radeon: support render nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable support for drm render nodes for radeon by flagging the ioctls that are safe and just needed for rendering. Signed-off-by: Christian König Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/radeon_kms.c | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6d09258fb9f2..cb4445f55a96 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -384,7 +384,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME, + DRIVER_PRIME | DRIVER_RENDER, .dev_priv_size = 0, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 205440d9544b..61580ddc4eb2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -741,18 +741,18 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ - DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); -- cgit v1.2.3 From 03c961ba6d04786c8a7e7df118538a01f442e90b Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 3 Sep 2013 08:50:39 +0200 Subject: gpu: host1x: Check for valid host1x pointer Under rare circumstances it can happen that the host1x driver's .probe() doesn't finish properly, in which case the device's driver-specific data will not be set. Instead of crashing in such a situation, propagate the error to callers of the host1x_get_drm_data() function. Signed-off-by: Thierry Reding --- drivers/gpu/host1x/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 28e28a23d444..471630299878 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -43,7 +43,7 @@ void host1x_set_drm_data(struct device *dev, void *data) void *host1x_get_drm_data(struct device *dev) { struct host1x *host1x = dev_get_drvdata(dev); - return host1x->drm_data; + return host1x ? host1x->drm_data : NULL; } void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) -- cgit v1.2.3 From 57c6eb6f2cf89baef4188b4257b3e756f9aebef4 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 3 Sep 2013 09:02:22 +0200 Subject: gpu: host1x: Sort drivers by probe order External driver declarations are sorted by probe order for consistency. Signed-off-by: Thierry Reding --- drivers/gpu/host1x/dev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 790ddf114e58..bed90a8131be 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -301,8 +301,8 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o) host->debug_op->show_mlocks(host, o); } -extern struct platform_driver tegra_hdmi_driver; extern struct platform_driver tegra_dc_driver; +extern struct platform_driver tegra_hdmi_driver; extern struct platform_driver tegra_gr2d_driver; #endif -- cgit v1.2.3 From 03da0e7ba9e3a1fc700f60913ff6bcb19f7eac2a Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 30 Aug 2013 15:27:16 +0200 Subject: drm/tegra: Parse device tree earlier Parsing the device tree may cause probing to be deferred. Doing this as early as possible prevents any other resources from being requested and enabled, therefore reducing the need to cleanup on deferred probe while at the same time not wasting precious CPU cycles determining if probing needs to be deferred or not. Signed-off-by: Thierry Reding --- drivers/gpu/host1x/drm/rgb.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/host1x/drm/rgb.c index ed4416f20260..5aa66ef7a946 100644 --- a/drivers/gpu/host1x/drm/rgb.c +++ b/drivers/gpu/host1x/drm/rgb.c @@ -147,6 +147,13 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) if (!rgb) return -ENOMEM; + rgb->output.dev = dc->dev; + rgb->output.of_node = np; + + err = tegra_output_parse_dt(&rgb->output); + if (err < 0) + return err; + rgb->clk = devm_clk_get(dc->dev, NULL); if (IS_ERR(rgb->clk)) { dev_err(dc->dev, "failed to get clock\n"); @@ -165,13 +172,6 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) return err; } - rgb->output.dev = dc->dev; - rgb->output.of_node = np; - - err = tegra_output_parse_dt(&rgb->output); - if (err < 0) - return err; - dc->rgb = &rgb->output; return 0; -- cgit v1.2.3 From 57be046e5af098ab2ff972269799ef495a7f8a2b Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Sat, 27 Jul 2013 00:27:00 -0400 Subject: drm/nouveau/core: get rid of math.h, replace log2i with order_base_2 Signed-off-by: Ilia Mirkin Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/core/ramht.c | 3 +-- drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c | 3 +-- drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c | 3 +-- drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | 3 +-- drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | 3 +-- drivers/gpu/drm/nouveau/core/include/core/math.h | 16 ---------------- drivers/gpu/drm/nouveau/core/os.h | 3 ++- 7 files changed, 7 insertions(+), 27 deletions(-) delete mode 100644 drivers/gpu/drm/nouveau/core/include/core/math.h diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c index 86a64045dd60..f3b9bddc3875 100644 --- a/drivers/gpu/drm/nouveau/core/core/ramht.c +++ b/drivers/gpu/drm/nouveau/core/core/ramht.c @@ -22,7 +22,6 @@ #include #include -#include #include @@ -104,6 +103,6 @@ nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu, if (ret) return ret; - ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3); + ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index e9b8217d0075..7e5dff51d3c5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -278,7 +277,7 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, return ret; ioffset = args->ioffset; - ilength = log2i(args->ilength / 8); + ilength = order_base_2(args->ilength / 8); nv_wo32(base->ramfc, 0x3c, 0x403f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 7f53196cff52..433b2d8b73b2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -258,7 +257,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, nv_parent(chan)->object_detach = nv50_fifo_object_detach; ioffset = args->ioffset; - ilength = log2i(args->ilength / 8); + ilength = order_base_2(args->ilength / 8); nv_wo32(base->ramfc, 0x3c, 0x403f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 46dfa68c47bb..ce92f289e751 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -200,7 +199,7 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, usermem = chan->base.chid * 0x1000; ioffset = args->ioffset; - ilength = log2i(args->ilength / 8); + ilength = order_base_2(args->ilength / 8); for (i = 0; i < 0x1000; i += 4) nv_wo32(priv->user.mem, usermem + i, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 09644fa9602c..8e8121abe31b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -240,7 +239,7 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent, usermem = chan->base.chid * 0x200; ioffset = args->ioffset; - ilength = log2i(args->ilength / 8); + ilength = order_base_2(args->ilength / 8); for (i = 0; i < 0x200; i += 4) nv_wo32(priv->user.mem, usermem + i, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h deleted file mode 100644 index f808131c5cd8..000000000000 --- a/drivers/gpu/drm/nouveau/core/include/core/math.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __NOUVEAU_MATH_H__ -#define __NOUVEAU_MATH_H__ - -static inline int -log2i(u64 base) -{ - u64 temp = base >> 1; - int log2; - - for (log2 = 0; temp; log2++, temp >>= 1) { - } - - return (base & (base - 1)) ? log2 + 1: log2; -} - -#endif diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h index 3bd9be2ab37f..d48683a82585 100644 --- a/drivers/gpu/drm/nouveau/core/os.h +++ b/drivers/gpu/drm/nouveau/core/os.h @@ -13,11 +13,12 @@ #include #include #include -#include #include +#include #include #include #include +#include #include -- cgit v1.2.3 From ef7d64e5c27bc2587b4a20c9ae04413ce679bd8c Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Sun, 28 Jul 2013 22:30:06 -0400 Subject: drm/nouveau/vdec: implement support for VP3 engines For NV98+, BSP/VP/PPP are all FUC-based engines. Hook them all up in the same way as NVC0, but with a couple of different values. Also make sure that the PPP engine is handled in the fifo/mc/vm. Signed-off-by: Ilia Mirkin Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c | 52 +++++++++++++++++-------- drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c | 2 + drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c | 52 +++++++++++++++---------- drivers/gpu/drm/nouveau/core/engine/vp/nv98.c | 47 +++++++++++++++------- drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c | 3 +- drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c | 1 + 6 files changed, 104 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c index 8bf92b0e6d82..6b089e022fd2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c @@ -19,16 +19,14 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs + * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin */ -#include -#include - +#include #include struct nv98_bsp_priv { - struct nouveau_engine base; + struct nouveau_falcon base; }; /******************************************************************************* @@ -37,30 +35,48 @@ struct nv98_bsp_priv { static struct nouveau_oclass nv98_bsp_sclass[] = { + { 0x88b1, &nouveau_object_ofuncs }, + { 0x85b1, &nouveau_object_ofuncs }, + { 0x86b1, &nouveau_object_ofuncs }, {}, }; /******************************************************************************* - * BSP context + * PBSP context ******************************************************************************/ static struct nouveau_oclass nv98_bsp_cclass = { .handle = NV_ENGCTX(BSP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, }, }; /******************************************************************************* - * BSP engine/subdev functions + * PBSP engine/subdev functions ******************************************************************************/ +static int +nv98_bsp_init(struct nouveau_object *object) +{ + struct nv98_bsp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x084010, 0x0000ffd2); + nv_wr32(priv, 0x08401c, 0x0000fff2); + return 0; +} + static int nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,7 +85,7 @@ nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_bsp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, + ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, "PBSP", "bsp", &priv); *pobject = nv_object(priv); if (ret) @@ -86,8 +102,10 @@ nv98_bsp_oclass = { .handle = NV_ENGINE(BSP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_bsp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_falcon_dtor, + .init = nv98_bsp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, }, }; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 433b2d8b73b2..91a87cd7195a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -56,6 +56,7 @@ nv84_fifo_context_attach(struct nouveau_object *parent, case NVDEV_ENGINE_SW : return 0; case NVDEV_ENGINE_GR : addr = 0x0020; break; case NVDEV_ENGINE_VP : addr = 0x0040; break; + case NVDEV_ENGINE_PPP : case NVDEV_ENGINE_MPEG : addr = 0x0060; break; case NVDEV_ENGINE_BSP : addr = 0x0080; break; case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break; @@ -91,6 +92,7 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, case NVDEV_ENGINE_SW : return 0; case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break; case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break; + case NVDEV_ENGINE_PPP : case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break; case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break; case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break; diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c index 5a5b2a773ed7..13bf31c40aa1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c @@ -19,21 +19,14 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs + * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin */ -#include -#include -#include - +#include #include struct nv98_ppp_priv { - struct nouveau_engine base; -}; - -struct nv98_ppp_chan { - struct nouveau_engctx base; + struct nouveau_falcon base; }; /******************************************************************************* @@ -42,6 +35,8 @@ struct nv98_ppp_chan { static struct nouveau_oclass nv98_ppp_sclass[] = { + { 0x88b3, &nouveau_object_ofuncs }, + { 0x85b3, &nouveau_object_ofuncs }, {}, }; @@ -53,12 +48,12 @@ static struct nouveau_oclass nv98_ppp_cclass = { .handle = NV_ENGCTX(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, }, }; @@ -66,6 +61,21 @@ nv98_ppp_cclass = { * PPPP engine/subdev functions ******************************************************************************/ +static int +nv98_ppp_init(struct nouveau_object *object) +{ + struct nv98_ppp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x086010, 0x0000ffd2); + nv_wr32(priv, 0x08601c, 0x0000fff2); + return 0; +} + static int nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -74,7 +84,7 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_ppp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, + ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true, "PPPP", "ppp", &priv); *pobject = nv_object(priv); if (ret) @@ -91,8 +101,10 @@ nv98_ppp_oclass = { .handle = NV_ENGINE(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_ppp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_falcon_dtor, + .init = nv98_ppp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, }, }; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c index 8a8236bc84de..fc9ae0ff1ef5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c @@ -19,16 +19,14 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs + * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin */ -#include -#include - +#include #include struct nv98_vp_priv { - struct nouveau_engine base; + struct nouveau_falcon base; }; /******************************************************************************* @@ -37,6 +35,8 @@ struct nv98_vp_priv { static struct nouveau_oclass nv98_vp_sclass[] = { + { 0x88b2, &nouveau_object_ofuncs }, + { 0x85b2, &nouveau_object_ofuncs }, {}, }; @@ -48,12 +48,12 @@ static struct nouveau_oclass nv98_vp_cclass = { .handle = NV_ENGCTX(VP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, }, }; @@ -61,6 +61,21 @@ nv98_vp_cclass = { * PVP engine/subdev functions ******************************************************************************/ +static int +nv98_vp_init(struct nouveau_object *object) +{ + struct nv98_vp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x085010, 0x0000ffd2); + nv_wr32(priv, 0x08501c, 0x0000fff2); + return 0; +} + static int nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,7 +84,7 @@ nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_vp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, + ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, "PVP", "vp", &priv); *pobject = nv_object(priv); if (ret) @@ -86,8 +101,10 @@ nv98_vp_oclass = { .handle = NV_ENGINE(VP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_vp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_falcon_dtor, + .init = nv98_vp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, }, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index 0d57b4d3e001..06710419a59b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -35,6 +35,7 @@ nv98_mc_intr[] = { { 0x00001000, NVDEV_ENGINE_GR }, { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ { 0x00008000, NVDEV_ENGINE_BSP }, + { 0x00020000, NVDEV_ENGINE_VP }, { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, @@ -42,7 +43,7 @@ nv98_mc_intr[] = { { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0040d101, NVDEV_SUBDEV_FB }, + { 0x0042d101, NVDEV_SUBDEV_FB }, {}, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c index 07dd1fe2d6fb..a4aa81a2173b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c @@ -174,6 +174,7 @@ nv50_vm_flush(struct nouveau_vm *vm) case NVDEV_ENGINE_GR : vme = 0x00; break; case NVDEV_ENGINE_VP : vme = 0x01; break; case NVDEV_SUBDEV_BAR : vme = 0x06; break; + case NVDEV_ENGINE_PPP : case NVDEV_ENGINE_MPEG : vme = 0x08; break; case NVDEV_ENGINE_BSP : vme = 0x09; break; case NVDEV_ENGINE_CRYPT: vme = 0x0a; break; -- cgit v1.2.3 From c98b81946827fe04c36bfa6bb376ffa739b0c2d0 Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Mon, 29 Jul 2013 19:05:16 -0400 Subject: drm/nouveau: remove duplicate copy of nv44_graph_class Signed-off-by: Ilia Mirkin Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/engine/graph/nv40.h | 3 +++ drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c | 10 ++-------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h index 7da35a4e7970..ad8209377529 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h @@ -1,6 +1,9 @@ #ifndef __NV40_GRAPH_H__ #define __NV40_GRAPH_H__ +#include +#include + /* returns 1 if device is one of the nv4x using the 0x4497 object class, * helpful to determine a number of other hardware features */ diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c index 716bf41bc3c1..b10a143787a7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c @@ -22,15 +22,9 @@ * Authors: Ben Skeggs */ -#include "nv04.h" +#include -static inline int -nv44_graph_class(struct nv04_instmem_priv *priv) -{ - if ((nv_device(priv)->chipset & 0xf0) == 0x60) - return 1; - return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f))); -} +#include "nv04.h" static int nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, -- cgit v1.2.3 From b969fa52ba725adf83761d579a300b1107f84478 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Tue, 30 Jul 2013 01:01:10 +0100 Subject: drm/nv50-/disp: use the number of dac, sor, pior rather than hardcoded values The values are already stored on chipset specific basis in the ctor. Make the most of them and simplify the code further by using a temporary variable to avoid code duplication. Signed-off-by: Emil Velikov Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | 34 ++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 7ffe2f309f12..c168ae3eaa97 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -628,7 +628,7 @@ nv50_disp_base_init(struct nouveau_object *object) } /* ... PIOR caps */ - for (i = 0; i < 3; i++) { + for (i = 0; i < priv->pior.nr; i++) { tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); } @@ -834,10 +834,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id) u8 ver, hdr, cnt, len; u16 data; u32 ctrl = 0x00000000; + u32 reg; int i; /* DAC */ - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); /* SOR */ @@ -845,19 +846,18 @@ exec_script(struct nv50_disp_priv *priv, int head, int id) if (nv_device(priv)->chipset < 0x90 || nv_device(priv)->chipset == 0x92 || nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); - i += 4; + reg = 0x610b74; } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610798 + (i * 8)); - i += 4; + reg = 0x610798; } + for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) + ctrl = nv_rd32(priv, reg + (i * 8)); + i += 4; } /* PIOR */ if (!(ctrl & (1 << head))) { - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) ctrl = nv_rd32(priv, 0x610b84 + (i * 8)); i += 8; } @@ -893,10 +893,11 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u8 ver, hdr, cnt, len; u32 ctrl = 0x00000000; u32 data, conf = ~0; + u32 reg; int i; /* DAC */ - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); /* SOR */ @@ -904,19 +905,18 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, if (nv_device(priv)->chipset < 0x90 || nv_device(priv)->chipset == 0x92 || nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); - i += 4; + reg = 0x610b70; } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610794 + (i * 8)); - i += 4; + reg = 0x610794; } + for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) + ctrl = nv_rd32(priv, reg + (i * 8)); + i += 4; } /* PIOR */ if (!(ctrl & (1 << head))) { - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) ctrl = nv_rd32(priv, 0x610b80 + (i * 8)); i += 8; } -- cgit v1.2.3 From bd9c5a2016307164c419c5e24a46921c10e620a0 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 7 Aug 2013 17:11:27 +0200 Subject: drm/nouveau: require contiguous bo for framebuffer This was already required before, but no check in the kernel was done to enforce it. Signed-off-by: Maarten Lankhorst Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_display.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 77ffded68837..d2712e6e5d31 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -107,6 +107,11 @@ nouveau_framebuffer_init(struct drm_device *dev, return -EINVAL; } + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { + NV_ERROR(drm, "framebuffer requires contiguous bo\n"); + return -EINVAL; + } + if (nv_device(drm->device)->chipset == 0x50) nv_fb->r_format |= (tile_flags << 8); -- cgit v1.2.3 From ffb8ea8af20d88ed8b314be62a77b992055d10c8 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Sun, 11 Aug 2013 22:48:48 -0400 Subject: drm/nouveau/therm: Set the correct pwm_mode upon resume Signed-off-by: Emil Velikov Signed-off-by: Martin Peres Tested-by: Martin Peres Tested-by: Dash Four Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/subdev/therm/base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index a00a5a76e2d6..3e9d941de553 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -268,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object) return ret; if (priv->suspend >= 0) - nouveau_therm_fan_mode(therm, priv->mode); + nouveau_therm_fan_mode(therm, priv->suspend); priv->sensor.program_alarms(therm); return 0; } -- cgit v1.2.3 From 4cc00ad1373674cdd1bce387bd1d4e3f5c42a99c Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 11 Aug 2013 22:48:49 -0400 Subject: drm/nouveau/fan: restore pwm value on resume when in manual/auto mode If the fan was in manual or auto mode, we should restore the fan speed that was previously set when resuming. The initial pwm value is saved when loading the module. Signed-off-by: Martin Peres Tested-by: Martin Peres Tested-by: Dash Four Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/subdev/therm/base.c | 7 ++++++- drivers/gpu/drm/nouveau/core/subdev/therm/fan.c | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index 3e9d941de553..2ada3d71312f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -267,8 +267,13 @@ _nouveau_therm_init(struct nouveau_object *object) if (ret) return ret; - if (priv->suspend >= 0) + if (priv->suspend >= 0) { + /* restore the pwm value only when on manual or auto mode */ + if (priv->suspend > 0) + nouveau_therm_fan_set(therm, true, priv->fan->percent); + nouveau_therm_fan_mode(therm, priv->suspend); + } priv->sensor.program_alarms(therm); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c index c728380d3d62..4d8450fcf0a0 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c @@ -234,6 +234,9 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm) nv_info(therm, "FAN control: %s\n", priv->fan->type); + /* read the current speed, it is useful when resuming */ + priv->fan->percent = nouveau_therm_fan_get(therm); + /* attempt to detect a tachometer connection */ ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach); if (ret) -- cgit v1.2.3 From 7fabd25393c7b5cb749d358772ccea2a570f4b49 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 11 Aug 2013 22:48:50 -0400 Subject: drm/nouveau/timer: restore the time on resume This can be useful if some parts of Nouveau try to calculate the time between two events. Without this patch, the time difference would be negative in the case where the computer is suspended/resumed between two events. This patch should fix fan speed probing when done while suspending/resuming. Solve this by saving the current time before suspending and by restoring it on resume. Signed-off-by: Martin Peres Tested-by: Martin Peres Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c index 9469b8275675..49350eaf4df5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c @@ -36,6 +36,7 @@ struct nv04_timer_priv { struct nouveau_timer base; struct list_head alarms; spinlock_t lock; + u64 suspend_time; }; static u64 @@ -146,6 +147,7 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->base.base.intr = nv04_timer_intr; priv->base.read = nv04_timer_read; priv->base.alarm = nv04_timer_alarm; + priv->suspend_time = 0; INIT_LIST_HEAD(&priv->alarms); spin_lock_init(&priv->lock); @@ -164,7 +166,7 @@ nv04_timer_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nv04_timer_priv *priv = (void *)object; - u32 m = 1, f, n, d; + u32 m = 1, f, n, d, lo, hi; int ret; ret = nouveau_timer_init(&priv->base); @@ -221,16 +223,25 @@ nv04_timer_init(struct nouveau_object *object) d >>= 1; } + /* restore the time before suspend */ + lo = priv->suspend_time; + hi = (priv->suspend_time >> 32); + nv_debug(priv, "input frequency : %dHz\n", f); nv_debug(priv, "input multiplier: %d\n", m); nv_debug(priv, "numerator : 0x%08x\n", n); nv_debug(priv, "denominator : 0x%08x\n", d); nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n); + nv_debug(priv, "time low : 0x%08x\n", lo); + nv_debug(priv, "time high : 0x%08x\n", hi); nv_wr32(priv, NV04_PTIMER_NUMERATOR, n); nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d); nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff); nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); + nv_wr32(priv, NV04_PTIMER_TIME_1, hi); + nv_wr32(priv, NV04_PTIMER_TIME_0, lo); + return 0; } @@ -238,6 +249,8 @@ static int nv04_timer_fini(struct nouveau_object *object, bool suspend) { struct nv04_timer_priv *priv = (void *)object; + if (suspend) + priv->suspend_time = nv04_timer_read(&priv->base); nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); return nouveau_timer_fini(&priv->base, suspend); } -- cgit v1.2.3 From b925a75d6729cec8debd0c378b354d8c228b36aa Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 11 Aug 2013 22:48:51 -0400 Subject: drm/nouveau/timer: add a way to cancel alarms Since alarms don't play well with suspend, it is important every alarm user cancels his tasks before suspending. The task should be rescheduled on resume. Signed-off-by: Martin Peres Tested-by: Martin Peres Tested-by: Dash Four Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/include/subdev/timer.h | 2 ++ drivers/gpu/drm/nouveau/core/subdev/timer/base.c | 7 +++++++ drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c | 20 ++++++++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h index e465d158d352..9ab70dfe5b02 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h @@ -22,6 +22,7 @@ bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data); bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data); bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data); void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *); +void nouveau_timer_alarm_cancel(void *, struct nouveau_alarm *); #define NV_WAIT_DEFAULT 2000000000ULL #define nv_wait(o,a,m,v) \ @@ -35,6 +36,7 @@ struct nouveau_timer { struct nouveau_subdev base; u64 (*read)(struct nouveau_timer *); void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *); + void (*alarm_cancel)(struct nouveau_timer *, struct nouveau_alarm *); }; static inline struct nouveau_timer * diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c index 5d417cc9949b..cf8a0e0f8ee3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c @@ -85,3 +85,10 @@ nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm) struct nouveau_timer *ptimer = nouveau_timer(obj); ptimer->alarm(ptimer, nsec, alarm); } + +void +nouveau_timer_alarm_cancel(void *obj, struct nouveau_alarm *alarm) +{ + struct nouveau_timer *ptimer = nouveau_timer(obj); + ptimer->alarm_cancel(ptimer, alarm); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c index 49350eaf4df5..57711ecb566c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c @@ -113,6 +113,25 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time, nv04_timer_alarm_trigger(ptimer); } +static void +nv04_timer_alarm_cancel(struct nouveau_timer *ptimer, + struct nouveau_alarm *alarm) +{ + struct nv04_timer_priv *priv = (void *)ptimer; + unsigned long flags; + + /* avoid deleting an entry while the alarm intr is running */ + spin_lock_irqsave(&priv->lock, flags); + + /* delete the alarm from the list */ + list_del(&alarm->head); + + /* reset the head so as list_empty returns 1 */ + INIT_LIST_HEAD(&alarm->head); + + spin_unlock_irqrestore(&priv->lock, flags); +} + static void nv04_timer_intr(struct nouveau_subdev *subdev) { @@ -147,6 +166,7 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->base.base.intr = nv04_timer_intr; priv->base.read = nv04_timer_read; priv->base.alarm = nv04_timer_alarm; + priv->base.alarm_cancel = nv04_timer_alarm_cancel; priv->suspend_time = 0; INIT_LIST_HEAD(&priv->alarms); -- cgit v1.2.3 From c4a62a766062c268d08c1aacf2e18e057e277db4 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 11 Aug 2013 22:48:52 -0400 Subject: drm/nouveau/therm: survive to suspend/resume cycles Therm uses 3 ptimer alarms. Two to drive the fan and one for polling the temperature. When suspending/resuming, alarms will never be fired. As we are checking if there isn't an alarm pending before rescheduling another one, we end up never checking temperature or updating the fan speed. This commit also adds debug messages to be able to spot more easily if this case happens again in the future. Sorry for the spam if you activate the debug level though. Tested-by: Dash Four v2: - fix temperature polling too Signed-off-by: Martin Peres Tested-by: Martin Peres Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/subdev/therm/base.c | 10 +++++++++- drivers/gpu/drm/nouveau/core/subdev/therm/fan.c | 17 +++++++++++++++++ drivers/gpu/drm/nouveau/core/subdev/therm/priv.h | 4 ++++ drivers/gpu/drm/nouveau/core/subdev/therm/temp.c | 21 +++++++++++++++++++++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index 2ada3d71312f..f1de7a9c572b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -95,12 +95,14 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode) int duty; spin_lock_irqsave(&priv->lock, flags); + nv_debug(therm, "FAN speed check\n"); if (mode < 0) mode = priv->mode; priv->mode = mode; switch (mode) { case NOUVEAU_THERM_CTRL_MANUAL: + ptimer->alarm_cancel(ptimer, &priv->alarm); duty = nouveau_therm_fan_get(therm); if (duty < 0) duty = 100; @@ -113,6 +115,7 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode) break; case NOUVEAU_THERM_CTRL_NONE: default: + ptimer->alarm_cancel(ptimer, &priv->alarm); goto done; } @@ -122,6 +125,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode) done: if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO)) ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); + else if (!list_empty(&priv->alarm.head)) + nv_debug(therm, "therm fan alarm list is not empty\n"); spin_unlock_irqrestore(&priv->lock, flags); } @@ -274,7 +279,8 @@ _nouveau_therm_init(struct nouveau_object *object) nouveau_therm_fan_mode(therm, priv->suspend); } - priv->sensor.program_alarms(therm); + nouveau_therm_sensor_init(therm); + nouveau_therm_fan_init(therm); return 0; } @@ -284,6 +290,8 @@ _nouveau_therm_fini(struct nouveau_object *object, bool suspend) struct nouveau_therm *therm = (void *)object; struct nouveau_therm_priv *priv = (void *)therm; + nouveau_therm_fan_fini(therm, suspend); + nouveau_therm_sensor_fini(therm, suspend); if (suspend) { priv->suspend = priv->mode; priv->mode = NOUVEAU_THERM_CTRL_NONE; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c index 4d8450fcf0a0..39f47b950ad1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c @@ -203,6 +203,23 @@ nouveau_therm_fan_safety_checks(struct nouveau_therm *therm) priv->fan->bios.min_duty = priv->fan->bios.max_duty; } +int +nouveau_therm_fan_init(struct nouveau_therm *therm) +{ + return 0; +} + +int +nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_timer *ptimer = nouveau_timer(therm); + + if (suspend) + ptimer->alarm_cancel(ptimer, &priv->fan->alarm); + return 0; +} + int nouveau_therm_fan_ctor(struct nouveau_therm *therm) { diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h index 15ca64e481f1..dd38529262fb 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h @@ -113,6 +113,8 @@ void nouveau_therm_ic_ctor(struct nouveau_therm *therm); int nouveau_therm_sensor_ctor(struct nouveau_therm *therm); int nouveau_therm_fan_ctor(struct nouveau_therm *therm); +int nouveau_therm_fan_init(struct nouveau_therm *therm); +int nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend); int nouveau_therm_fan_get(struct nouveau_therm *therm); int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent); int nouveau_therm_fan_user_get(struct nouveau_therm *therm); @@ -122,6 +124,8 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm); int nouveau_therm_preinit(struct nouveau_therm *); +int nouveau_therm_sensor_init(struct nouveau_therm *therm); +int nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend); void nouveau_therm_sensor_preinit(struct nouveau_therm *); void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, enum nouveau_therm_thrs thrs, diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index dde746c78c8a..b80a33011b93 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -180,6 +180,8 @@ alarm_timer_callback(struct nouveau_alarm *alarm) spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); + nv_debug(therm, "polling the internal temperature\n"); + nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, NOUVEAU_THERM_THRS_FANBOOST); @@ -216,6 +218,25 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) alarm_timer_callback(&priv->sensor.therm_poll_alarm); } +int +nouveau_therm_sensor_init(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + priv->sensor.program_alarms(therm); + return 0; +} + +int +nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_timer *ptimer = nouveau_timer(therm); + + if (suspend) + ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm); + return 0; +} + void nouveau_therm_sensor_preinit(struct nouveau_therm *therm) { -- cgit v1.2.3 From c865534f1e5b5b4ef03f4a55cf4730f4b70dd75b Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Fri, 23 Aug 2013 13:03:14 -0400 Subject: drm/nouveau/i2c: pass the function pointers in at creation time i2c_bit_add_bus can call the pre_xfer function, which expects the func pointer to be set. Pass in func to the port creation logic so that it is set before i2c_bit_add_bus. See https://bugs.freedesktop.org/show_bug.cgi?id=68456 Reported-by: Hans-Peter Deifel Tested-by: Hans-Peter Deifel Signed-off-by: Ilia Mirkin Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/include/subdev/i2c.h | 8 +++++--- drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c | 10 ++++------ drivers/gpu/drm/nouveau/core/subdev/i2c/base.c | 2 ++ drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c | 4 ++-- drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c | 4 ++-- drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c | 4 ++-- drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c | 8 ++++---- drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c | 4 ++-- 8 files changed, 23 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h index 888384c0bed8..7e4e2775f249 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h @@ -39,8 +39,8 @@ struct nouveau_i2c_func { int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe); }; -#define nouveau_i2c_port_create(p,e,o,i,a,d) \ - nouveau_i2c_port_create_((p), (e), (o), (i), (a), \ +#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \ + nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \ sizeof(**d), (void **)d) #define nouveau_i2c_port_destroy(p) ({ \ struct nouveau_i2c_port *port = (p); \ @@ -53,7 +53,9 @@ struct nouveau_i2c_func { int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, u8, - const struct i2c_algorithm *, int, void **); + const struct i2c_algorithm *, + const struct nouveau_i2c_func *, + int, void **); void _nouveau_i2c_port_dtor(struct nouveau_object *); #define _nouveau_i2c_port_init nouveau_object_init #define _nouveau_i2c_port_fini nouveau_object_fini diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c index dec94e9d776a..4b195ac4da66 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c @@ -118,7 +118,8 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_aux_algo, &chan); + &nouveau_i2c_aux_algo, &anx9805_aux_func, + &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -140,8 +141,6 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent, struct i2c_algo_bit_data *algo = mast->adapter.algo_data; algo->udelay = max(algo->udelay, 40); } - - chan->base.func = &anx9805_aux_func; return 0; } @@ -234,7 +233,8 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &anx9805_i2c_algo, &port); + &anx9805_i2c_algo, &anx9805_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; @@ -256,8 +256,6 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent, struct i2c_algo_bit_data *algo = mast->adapter.algo_data; algo->udelay = max(algo->udelay, 40); } - - port->base.func = &anx9805_i2c_func; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 8ae2625415e1..2895c19bb152 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -95,6 +95,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u8 index, const struct i2c_algorithm *algo, + const struct nouveau_i2c_func *func, int size, void **pobject) { struct nouveau_device *device = nv_device(parent); @@ -112,6 +113,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent, port->adapter.owner = THIS_MODULE; port->adapter.dev.parent = &device->pdev->dev; port->index = index; + port->func = func; i2c_set_adapdata(&port->adapter, i2c); if ( algo == &nouveau_i2c_bit_algo && diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c index 2ad18840fe63..860d5d2365da 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c @@ -91,12 +91,12 @@ nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_bit_algo, &port); + &nouveau_i2c_bit_algo, &nv04_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; - port->base.func = &nv04_i2c_func; port->drive = info->drive; port->sense = info->sense; return 0; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c index f501ae25dbb3..0c2655a03bb4 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c @@ -84,12 +84,12 @@ nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_bit_algo, &port); + &nouveau_i2c_bit_algo, &nv4e_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; - port->base.func = &nv4e_i2c_func; port->addr = 0x600800 + info->drive; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c index 378dfa324e5f..a8d67a287704 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c @@ -85,7 +85,8 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_bit_algo, &port); + &nouveau_i2c_bit_algo, &nv50_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; @@ -93,7 +94,6 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (info->drive >= nv50_i2c_addr_nr) return -EINVAL; - port->base.func = &nv50_i2c_func; port->state = 0x00000007; port->addr = nv50_i2c_addr[info->drive]; return 0; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c index 61b771670bfe..df6d3e4b68be 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c @@ -186,7 +186,8 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_bit_algo, &port); + &nouveau_i2c_bit_algo, &nv94_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; @@ -194,7 +195,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (info->drive >= nv50_i2c_addr_nr) return -EINVAL; - port->base.func = &nv94_i2c_func; port->state = 7; port->addr = nv50_i2c_addr[info->drive]; if (info->share != DCB_I2C_UNUSED) { @@ -221,12 +221,12 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_aux_algo, &port); + &nouveau_i2c_aux_algo, &nv94_aux_func, + &port); *pobject = nv_object(port); if (ret) return ret; - port->base.func = &nv94_aux_func; port->addr = info->drive; if (info->share != DCB_I2C_UNUSED) { port->ctrl = 0x00e500 + (info->drive * 0x50); diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c index f761b8a610f1..29967d30f97c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c @@ -60,12 +60,12 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_i2c_port_create(parent, engine, oclass, index, - &nouveau_i2c_bit_algo, &port); + &nouveau_i2c_bit_algo, &nvd0_i2c_func, + &port); *pobject = nv_object(port); if (ret) return ret; - port->base.func = &nvd0_i2c_func; port->state = 0x00000007; port->addr = 0x00d014 + (info->drive * 0x20); if (info->share != DCB_I2C_UNUSED) { -- cgit v1.2.3 From 5087f51da805f53cba7366f70d596e7bde2a5486 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Fri, 23 Aug 2013 18:43:42 +0100 Subject: drm/nv50/disp: prevent false output detection on the original nv50 Commit ea9197cc323839ef3d5280c0453b2c622caa6bc7 effectively enabled the use of an improved DAC detection code, but introduced a regression on the original nv50 chipset, causing a ghost monitor to be detected. v2 (Ben Skeggs): the offending line was likely a thinko, removed it for all chipsets (tested nv50 and nve6 to cover entire range) and added some additional debugging. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=67382 Tested-by: Martin Peres Signed-off-by: Emil Velikov Cc: # 3.9+ Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c index f02fd9f443ff..a66b27c0fcab 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c @@ -49,18 +49,23 @@ int nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) { const u32 doff = (or * 0x800); - int load = -EINVAL; + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); + nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); mdelay(9); udelay(500); - nv_wr32(priv, 0x61a00c + doff, 0x80000000); - load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; - nv_wr32(priv, 0x61a00c + doff, 0x00000000); + loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000); + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - return load; + + nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval); + if (!(loadval & 0x80000000)) + return -ETIMEDOUT; + + return (loadval & 0x38000000) >> 27; } int -- cgit v1.2.3 From 4b31ebcf69a48d5d70cf26cea080bd0818fdd9af Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 4 Sep 2013 11:01:42 +1000 Subject: drm/nv50-/kms: assume analog display connected if load on any pin Fixes a VGA monitor with a dodgy red (in this case) pin not being detected. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 9d2092a5ed38..f8e66c08b11a 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1583,7 +1583,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) load = 340; ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); - if (ret || load != 7) + if (ret || !load) return connector_status_disconnected; return connector_status_connected; -- cgit v1.2.3 From a27e56996687e79416d69a7e6dc26f9d8fe06059 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 28 Aug 2013 02:00:50 +0200 Subject: drm/nouveau: use MSI interrupts MSIs were only problematic on some old, broken chipsets. But now that we already see systems where PCI legacy interrupts are somewhat flaky, it's really time to move to MSIs. v2 (Ben Skeggs): blacklist BR02 boards Signed-off-by: Lucas Stach Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/include/subdev/mc.h | 1 + drivers/gpu/drm/nouveau/core/os.h | 1 + drivers/gpu/drm/nouveau/core/subdev/mc/base.c | 24 +++++++++++++++++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index 9d2cd2006250..ce6569f365a7 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -12,6 +12,7 @@ struct nouveau_mc_intr { struct nouveau_mc { struct nouveau_subdev base; const struct nouveau_mc_intr *intr_map; + bool use_msi; }; static inline struct nouveau_mc * diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h index d48683a82585..191e739f30d1 100644 --- a/drivers/gpu/drm/nouveau/core/os.h +++ b/drivers/gpu/drm/nouveau/core/os.h @@ -19,6 +19,7 @@ #include #include #include +#include #include diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 20f9a538746e..37712a6df923 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -23,7 +23,7 @@ */ #include -#include +#include static irqreturn_t nouveau_mc_intr(int irq, void *arg) @@ -47,6 +47,9 @@ nouveau_mc_intr(int irq, void *arg) map++; } + if (pmc->use_msi) + nv_wr08(pmc->base.base.parent, 0x00088068, 0xff); + if (intr) { nv_error(pmc, "unknown intr 0x%08x\n", stat); } @@ -81,6 +84,8 @@ _nouveau_mc_dtor(struct nouveau_object *object) struct nouveau_device *device = nv_device(object); struct nouveau_mc *pmc = (void *)object; free_irq(device->pdev->irq, pmc); + if (pmc->use_msi) + pci_disable_msi(device->pdev); nouveau_subdev_destroy(&pmc->base); } @@ -102,6 +107,23 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, pmc->intr_map = intr_map; + switch (device->pdev->device & 0x0ff0) { + case 0x00f0: /* BR02? */ + case 0x02e0: /* BR02? */ + pmc->use_msi = false; + break; + default: + pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true); + if (pmc->use_msi) { + pmc->use_msi = pci_enable_msi(device->pdev) == 0; + if (pmc->use_msi) { + nv_info(pmc, "MSI interrupts enabled\n"); + nv_wr08(device, 0x00088068, 0xff); + } + } + break; + } + ret = request_irq(device->pdev->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau", pmc); if (ret < 0) -- cgit v1.2.3 From c072470f4e9abdde7cdf1c850b8826f32f9e79e3 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sat, 31 Aug 2013 01:58:50 +0200 Subject: drm/nouveau/bios/therm: handle vbioses with duplicate entries (mostly nva5) Some vbioses have extra useless entries after "the end" of the table. This is problematic since all of the vbios I found with this issue redefine the pwm freq divider to insane levels (52750 Hz instead of 2500), thus breaking fan management. The first solution to solve this mess would be to change the length of the table. The solution I choose was simply to avoid setting the pwm freq twice as the other redefinitions are harmless with our current parser. Signed-off-by: Martin Peres Reported-by: Mariusz Bialonczyk Tested-by: Mariusz Bialonczyk Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/subdev/bios/therm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c index 22a20573ed1b..22ac6dbd6c8f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c @@ -184,7 +184,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios, cur_trip->fan_duty = value; break; case 0x26: - fan->pwm_freq = value; + if (!fan->pwm_freq) + fan->pwm_freq = value; break; case 0x3b: fan->bump_period = value; -- cgit v1.2.3 From c859074e7d804a254f318bb55ba1b39893247fc7 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 2 Sep 2013 16:31:31 +0200 Subject: drm/nouveau: fix command submission to use vmalloc for big allocations I was getting a order 4 allocation failure from kmalloc when testing some game after a few days uptime with some suspend/resumes. For big allocations vmalloc should be used instead. Signed-off-by: Maarten Lankhorst Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 487242fb3fdc..f32b71238c03 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -579,18 +579,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, return 0; } +static inline void +u_free(void *addr) +{ + if (!is_vmalloc_addr(addr)) + kfree(addr); + else + vfree(addr); +} + static inline void * u_memcpya(uint64_t user, unsigned nmemb, unsigned size) { void *mem; void __user *userptr = (void __force __user *)(uintptr_t)user; - mem = kmalloc(nmemb * size, GFP_KERNEL); + size *= nmemb; + + mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!mem) + mem = vmalloc(size); if (!mem) return ERR_PTR(-ENOMEM); - if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { - kfree(mem); + if (DRM_COPY_FROM_USER(mem, userptr, size)) { + u_free(mem); return ERR_PTR(-EFAULT); } @@ -676,7 +689,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - kfree(reloc); + u_free(reloc); return ret; } @@ -738,7 +751,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); if (IS_ERR(bo)) { - kfree(push); + u_free(push); return nouveau_abi16_put(abi16, PTR_ERR(bo)); } @@ -849,8 +862,8 @@ out: nouveau_fence_unref(&fence); out_prevalid: - kfree(bo); - kfree(push); + u_free(bo); + u_free(push); out_next: if (chan->dma.ib_max) { -- cgit v1.2.3 From ab62e7686172767f79be9d0d9beac92e22a5e6da Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 2 Sep 2013 19:01:23 +1000 Subject: drm/radeon: protect ACPI calls with CONFIG_ACPI Signed-off-by: Stephen Rothwell Acked-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/ci_dpm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 916630fdc796..3cce533397c6 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4208,6 +4208,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic pi->pspp_notify_required = false; if (target_link_speed > current_link_speed) { switch (target_link_speed) { +#ifdef CONFIG_ACPI case RADEON_PCIE_GEN3: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) break; @@ -4217,6 +4218,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; +#endif default: pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); break; @@ -4248,7 +4250,9 @@ static void ci_notify_link_speed_change_after_state_change(struct radeon_device (ci_get_current_pcie_speed(rdev) > 0)) return; +#ifdef CONFIG_ACPI radeon_acpi_pcie_performance_request(rdev, request, false); +#endif } } -- cgit v1.2.3 From 3b28802e37bb1ca1cab584f679c42e72a7e384f8 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Sun, 1 Sep 2013 15:23:04 +0200 Subject: drm/tda998x: BUG() on invalid audio format Suppress warning of unused-variables by adding a BUG()+return for invalid audio-formats. Cc: Rob Clark Signed-off-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/i2c/tda998x_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index c2bd711e86e9..b1f8fc69023f 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -596,6 +596,10 @@ tda998x_configure_audio(struct drm_encoder *encoder, cts_n = CTS_N_M(3) | CTS_N_K(3); ca_i2s = CA_I2S_CA_I2S(0); break; + + default: + BUG(); + return; } reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); -- cgit v1.2.3 From a1bfacf4006a3bb410b0fa85e203f9249d2d35e9 Mon Sep 17 00:00:00 2001 From: Vikas Sajjan Date: Tue, 6 Aug 2013 17:22:04 +0530 Subject: drm/exynos: Add fallback option to get non physically contiguous memory for fb While trying to get boot-logo up on exynos5420 SMDK which has eDP panel connected with resolution 2560x1600, following error occured even with IOMMU enabled: [0.880000] [drm:lowlevel_buffer_allocate] *ERROR* failed to allocate buffer. [0.890000] [drm] Initialized exynos 1.0.0 20110530 on minor 0 To address the cases where physically contiguous memory MAY NOT be a mandatory requirement for fb, the patch adds a feature to get non physically contiguous memory for fb if physically contiguous memory allocation fails and if IOMMU is supported. Signed-off-by: Vikas Sajjan Signed-off-by: Arun Kumar Reviewed-by: Rob Clark Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 8e60bd61137f..f82a1d4a0fcb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" @@ -165,8 +166,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; - /* 0 means to allocate physically continuous memory */ - exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); + exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size); + /* + * If physically contiguous memory allocation fails and if IOMMU is + * supported then try to get buffer from non physically contiguous + * memory area. + */ + if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) { + dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); + exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, + size); + } + if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); goto err_release_framebuffer; -- cgit v1.2.3 From b10d6350a5dfce9c4640e0974936452afd171a13 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 24 Jul 2013 15:44:30 +0900 Subject: drm/exynos: add runtime pm interfaces to g2d driver This patch makes g2d power domain and clock to be controlled through pm runtime interfaces instead of controlling them respectively. Signed-off-by: Inki Dae Signed-off-by: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 40 +++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index eddea4941483..b31356e67e5e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -808,17 +808,8 @@ static void g2d_dma_start(struct g2d_data *g2d, int ret; ret = pm_runtime_get_sync(g2d->dev); - if (ret < 0) { - dev_warn(g2d->dev, "failed pm power on.\n"); - return; - } - - ret = clk_prepare_enable(g2d->gate_clk); - if (ret < 0) { - dev_warn(g2d->dev, "failed to enable clock.\n"); - pm_runtime_put_sync(g2d->dev); + if (ret < 0) return; - } writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); @@ -871,7 +862,6 @@ static void g2d_runqueue_worker(struct work_struct *work) runqueue_work); mutex_lock(&g2d->runqueue_mutex); - clk_disable_unprepare(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); complete(&g2d->runqueue_node->complete); @@ -1524,7 +1514,33 @@ static int g2d_resume(struct device *dev) } #endif -static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); +#ifdef CONFIG_PM_RUNTIME +static int g2d_runtime_suspend(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); + + clk_disable_unprepare(g2d->gate_clk); + + return 0; +} + +static int g2d_runtime_resume(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(g2d->gate_clk); + if (ret < 0) + dev_warn(dev, "failed to enable clock.\n"); + + return ret; +} +#endif + +static const struct dev_pm_ops g2d_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) + SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) +}; #ifdef CONFIG_OF static const struct of_device_id exynos_g2d_match[] = { -- cgit v1.2.3 From e30655d06179aa91c0c4caa2b7a6c55f8f81a731 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 13 Aug 2013 00:46:40 +0100 Subject: drm/exynos: Add missing includes Ensure that all externally accessed functions are correctly prototyped when defined in each file by making sure the headers with the protoypes are included in the file with the definition. Signed-off-by: Mark Brown Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_connector.c | 1 + drivers/gpu/drm/exynos/exynos_drm_crtc.c | 1 + drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 1 + drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 1 + drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1 + drivers/gpu/drm/exynos/exynos_drm_g2d.c | 1 + drivers/gpu/drm/exynos/exynos_drm_gsc.c | 1 + drivers/gpu/drm/exynos/exynos_drm_plane.c | 1 + drivers/gpu/drm/exynos/exynos_drm_rotator.c | 1 + drivers/gpu/drm/exynos/exynos_drm_vidi.c | 1 + 10 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 02a8bc5226ca..3f80673c12ec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -17,6 +17,7 @@ #include #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_connector.h" #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ drm_connector) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 14f5c1d34028..e6992d8d2046 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -15,6 +15,7 @@ #include #include +#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" #include "exynos_drm_plane.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index fd76449cf452..e80e0a807a4b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -11,6 +11,7 @@ #include #include +#include "exynos_drm_dmabuf.h" #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index f82a1d4a0fcb..eac6933125bc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -20,6 +20,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" +#include "exynos_drm_fbdev.h" #include "exynos_drm_gem.h" #include "exynos_drm_iommu.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6e047bd53e2f..a8ab4a456afa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -21,6 +21,7 @@ #include #include #include "regs-fimc.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" #include "exynos_drm_fimc.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index b31356e67e5e..5cec194b6cc5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -23,6 +23,7 @@ #include #include #include "exynos_drm_drv.h" +#include "exynos_drm_g2d.h" #include "exynos_drm_gem.h" #include "exynos_drm_iommu.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 90b8a1a5344c..e69d1d294629 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -20,6 +20,7 @@ #include #include #include "regs-gsc.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" #include "exynos_drm_gsc.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 6ee55e68e0a2..98eb1f709b13 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -16,6 +16,7 @@ #include "exynos_drm_encoder.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" +#include "exynos_drm_plane.h" #define to_exynos_plane(x) container_of(x, struct exynos_plane, base) diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 49669aa24c45..54b7360bad30 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -21,6 +21,7 @@ #include #include "regs-rotator.h" #include "exynos_drm.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index c57c56519add..4400330e4449 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -23,6 +23,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_vidi.h" /* vidi has totally three virtual windows. */ #define WINDOWS_NR 3 -- cgit v1.2.3 From 319477f35eef46e3c1eeb155f17c5fb06ef85704 Mon Sep 17 00:00:00 2001 From: Chanho Park Date: Tue, 13 Aug 2013 14:12:56 +0900 Subject: drm/exynos: add device tree support for rotator The exynos4 platform is only dt-based since 3.10, we should convert driver data and ids to dt-based parsing methods. The rotator driver has a limit table to get size limit of input picture. Each SoCs has slightly different limit value compared with any others. For example, exynos4210's max_size of RGB888 is 16k x 16k. But, others have 8k x 8k. Another example the exynos5250 should have multiple of 2 pixel size for its X/Y axis. Thus, we should keep different tables for each of them. This patch also includes desciptions of each nodes for the rotator and specifies a example how to bind it. Signed-off-by: Chanho Park Signed-off-by: Kyungmin Park Signed-off-by: Inki Dae --- .../devicetree/bindings/gpu/samsung-rotator.txt | 27 ++++++ drivers/gpu/drm/exynos/exynos_drm_rotator.c | 108 +++++++++++++++------ 2 files changed, 107 insertions(+), 28 deletions(-) create mode 100644 Documentation/devicetree/bindings/gpu/samsung-rotator.txt diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt new file mode 100644 index 000000000000..82cd1ed0be93 --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt @@ -0,0 +1,27 @@ +* Samsung Image Rotator + +Required properties: + - compatible : value should be one of the following: + (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210 + (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412 + (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250 + + - reg : Physical base address of the IP registers and length of memory + mapped region. + + - interrupts : Interrupt specifier for rotator interrupt, according to format + specific to interrupt parent. + + - clocks : Clock specifier for rotator clock, according to generic clock + bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt) + + - clock-names : Names of clocks. For exynos rotator, it should be "rotator". + +Example: + rotator@12810000 { + compatible = "samsung,exynos4210-rotator"; + reg = <0x12810000 0x1000>; + interrupts = <0 83 0>; + clocks = <&clock 278>; + clock-names = "rotator"; + }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 54b7360bad30..a77cd5bb104c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -632,21 +632,98 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) return 0; } +static struct rot_limit_table rot_limit_tbl_4210 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_64K, + .max_h = SZ_64K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_16K, + .max_h = SZ_16K, + .align = 2, + }, +}; + +static struct rot_limit_table rot_limit_tbl_4x12 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_32K, + .max_h = SZ_32K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_8K, + .max_h = SZ_8K, + .align = 2, + }, +}; + +static struct rot_limit_table rot_limit_tbl_5250 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_32K, + .max_h = SZ_32K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_8K, + .max_h = SZ_8K, + .align = 1, + }, +}; + +static const struct of_device_id exynos_rotator_match[] = { + { + .compatible = "samsung,exynos4210-rotator", + .data = &rot_limit_tbl_4210, + }, + { + .compatible = "samsung,exynos4212-rotator", + .data = &rot_limit_tbl_4x12, + }, + { + .compatible = "samsung,exynos5250-rotator", + .data = &rot_limit_tbl_5250, + }, + {}, +}; + static int rotator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rot_context *rot; struct exynos_drm_ippdrv *ippdrv; + const struct of_device_id *match; int ret; + if (!dev->of_node) { + dev_err(dev, "cannot find of_node.\n"); + return -ENODEV; + } + rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); if (!rot) { dev_err(dev, "failed to allocate rot\n"); return -ENOMEM; } - rot->limit_tbl = (struct rot_limit_table *) - platform_get_device_id(pdev)->driver_data; + match = of_match_node(exynos_rotator_match, dev->of_node); + if (!match) { + dev_err(dev, "failed to match node\n"); + return -ENODEV; + } + rot->limit_tbl = (struct rot_limit_table *)match->data; rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rot->regs = devm_ioremap_resource(dev, rot->regs_res); @@ -718,31 +795,6 @@ static int rotator_remove(struct platform_device *pdev) return 0; } -static struct rot_limit_table rot_limit_tbl = { - .ycbcr420_2p = { - .min_w = 32, - .min_h = 32, - .max_w = SZ_32K, - .max_h = SZ_32K, - .align = 3, - }, - .rgb888 = { - .min_w = 8, - .min_h = 8, - .max_w = SZ_8K, - .max_h = SZ_8K, - .align = 2, - }, -}; - -static struct platform_device_id rotator_driver_ids[] = { - { - .name = "exynos-rot", - .driver_data = (unsigned long)&rot_limit_tbl, - }, - {}, -}; - static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -804,10 +856,10 @@ static const struct dev_pm_ops rotator_pm_ops = { struct platform_driver rotator_driver = { .probe = rotator_probe, .remove = rotator_remove, - .id_table = rotator_driver_ids, .driver = { .name = "exynos-rot", .owner = THIS_MODULE, .pm = &rotator_pm_ops, + .of_match_table = exynos_rotator_match, }, }; -- cgit v1.2.3 From ca7c6220bdddaf35dc13454cf85367c0f72f3e74 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 14 Aug 2013 16:38:00 +0530 Subject: drm/exynos: Remove redundant NULL check in exynos_drm_buf kfree handles null pointers. Hence this check is not necessary. Signed-off-by: Sachin Kamat Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_buf.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index b8ac06d92fbf..7f489e7330fe 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -161,11 +161,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buffer) { - if (!buffer) { - DRM_DEBUG_KMS("buffer is null.\n"); - return; - } - kfree(buffer); buffer = NULL; } -- cgit v1.2.3 From 3f1c781d9354b3856ba7de64104659a2e2033fd4 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 14 Aug 2013 16:38:01 +0530 Subject: drm/exynos: Add missing of.h header include Add of.h explicitly for of_* APIs. Signed-off-by: Sachin Kamat Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_ddc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1 + drivers/gpu/drm/exynos/exynos_drm_fimd.c | 1 + drivers/gpu/drm/exynos/exynos_hdmi.c | 1 + drivers/gpu/drm/exynos/exynos_hdmiphy.c | 1 + drivers/gpu/drm/exynos/exynos_mixer.c | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 30ef41bcd7b8..d1e539b1649c 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c @@ -15,7 +15,7 @@ #include #include - +#include #include "exynos_drm_drv.h" #include "exynos_hdmi.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a8ab4a456afa..b047597d1f6b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 1c263dac3c1c..19328d03e46d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 2f5c6942c968..aecc601d540b 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index 6e320ae9afed..6021996d3530 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c @@ -15,6 +15,7 @@ #include #include +#include #include "exynos_drm_drv.h" #include "exynos_hdmi.h" diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index c9a137caea41..f6cb1205a2fd 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -30,6 +30,7 @@ #include #include #include +#include #include -- cgit v1.2.3 From 38bb5253a95f2eb8cb765b7ab88aac686de6cb12 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Mon, 19 Aug 2013 19:04:55 +0900 Subject: drm/exynos: Remove redundant error messages kzalloc already has built-in error messages. Hence remove additional ones. Signed-off-by: Sachin Kamat Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_buf.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_connector.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 1 - drivers/gpu/drm/exynos/exynos_drm_drv.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_encoder.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_fb.c | 8 ++------ drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 15 +++------------ drivers/gpu/drm/exynos/exynos_drm_gem.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_gsc.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_ipp.c | 22 +++++----------------- drivers/gpu/drm/exynos/exynos_drm_plane.c | 4 +--- drivers/gpu/drm/exynos/exynos_drm_rotator.c | 8 ++------ drivers/gpu/drm/exynos/exynos_hdmi.c | 16 ++++------------ drivers/gpu/drm/exynos/exynos_mixer.c | 8 ++------ 19 files changed, 30 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 7f489e7330fe..3445a0f3a6b2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, DRM_DEBUG_KMS("desired size = 0x%x\n", size); buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) { - DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); + if (!buffer) return NULL; - } buffer->size = size; return buffer; diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 3f80673c12ec..de7c7b294d2c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -279,10 +279,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, int err; exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); - if (!exynos_connector) { - DRM_ERROR("failed to allocate connector\n"); + if (!exynos_connector) return NULL; - } connector = &exynos_connector->drm_connector; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e6992d8d2046..ebc01503d50e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -325,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) struct drm_crtc *crtc; exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); - if (!exynos_crtc) { - DRM_ERROR("failed to allocate exynos crtc\n"); + if (!exynos_crtc) return -ENOMEM; - } exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index e80e0a807a4b..59827cc5e770 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -231,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { - DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index df81d3c959b4..bb82ef78ca85 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) int nr; private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); - if (!private) { - DRM_ERROR("failed to allocate private\n"); + if (!private) return -ENOMEM; - } INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index a99a033793bc..06f1b2a09da7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev, return NULL; exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); - if (!exynos_encoder) { - DRM_ERROR("failed to allocate encoder\n"); + if (!exynos_encoder) return NULL; - } exynos_encoder->dpms = DRM_MODE_DPMS_OFF; exynos_encoder->manager = manager; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index c2d149f0408a..ea39e0ef2ae4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev, } exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) { - DRM_ERROR("failed to allocate exynos drm framebuffer\n"); + if (!exynos_fb) return ERR_PTR(-ENOMEM); - } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; @@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, int i, ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) { - DRM_ERROR("failed to allocate exynos drm framebuffer\n"); + if (!exynos_fb) return ERR_PTR(-ENOMEM); - } obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!obj) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index eac6933125bc..78e868bcf1ec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -248,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev) return 0; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); - if (!fbdev) { - DRM_ERROR("failed to allocate drm fbdev.\n"); + if (!fbdev) return -ENOMEM; - } private->fb_helper = helper = &fbdev->drm_fb_helper; helper->funcs = &exynos_drm_fb_helper_funcs; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index b047597d1f6b..8adfc8f1e08f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1345,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->writeback = 1; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 19328d03e46d..b8aa8fee8201 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -890,10 +890,8 @@ static int fimd_probe(struct platform_device *pdev) if (dev->of_node) { pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - DRM_ERROR("memory allocation for pdata failed\n"); + if (!pdata) return -ENOMEM; - } ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing, OF_USE_NATIVE_MODE); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 5cec194b6cc5..0b8b6e43bbd2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -447,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, } g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); - if (!g2d_userptr) { - DRM_ERROR("failed to allocate g2d_userptr.\n"); + if (!g2d_userptr) return ERR_PTR(-ENOMEM); - } atomic_set(&g2d_userptr->refcount, 1); @@ -500,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { - DRM_ERROR("failed to allocate sg table.\n"); ret = -ENOMEM; goto err_free_userptr; } @@ -1087,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, e = kzalloc(sizeof(*node->event), GFP_KERNEL); if (!e) { - dev_err(dev, "failed to allocate event\n"); - spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -1318,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, struct exynos_drm_g2d_private *g2d_priv; g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); - if (!g2d_priv) { - dev_err(dev, "failed to allocate g2d private data\n"); + if (!g2d_priv) return -ENOMEM; - } g2d_priv->dev = dev; file_priv->g2d_priv = g2d_priv; @@ -1377,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev) int ret; g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); - if (!g2d) { - dev_err(dev, "failed to allocate driver data\n"); + if (!g2d) return -ENOMEM; - } g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", sizeof(struct g2d_runqueue_node), 0, 0, NULL); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f3c6f40666e1..862f1d9a2ecb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -191,10 +191,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, int ret; exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); - if (!exynos_gem_obj) { - DRM_ERROR("failed to allocate exynos gem object\n"); + if (!exynos_gem_obj) return NULL; - } exynos_gem_obj->size = size; obj = &exynos_gem_obj->base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index e69d1d294629..cd6aebd53bd0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1338,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->writeback = 1; diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 8d3bc01d6834..8548b974bd59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev) struct drm_hdmi_context *ctx; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - DRM_LOG_KMS("failed to alloc common hdmi context.\n"); + if (!ctx) return -ENOMEM; - } subdrv = &ctx->subdrv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d2b6ab4def93..824e0705c8d3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) struct drm_exynos_ipp_cmd_work *cmd_work; cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); - if (!cmd_work) { - DRM_ERROR("failed to alloc cmd_work.\n"); + if (!cmd_work) return ERR_PTR(-ENOMEM); - } INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); @@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) struct drm_exynos_ipp_event_work *event_work; event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); - if (!event_work) { - DRM_ERROR("failed to alloc event_work.\n"); + if (!event_work) return ERR_PTR(-ENOMEM); - } INIT_WORK((struct work_struct *)event_work, ipp_sched_event); @@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, /* allocate command node */ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); - if (!c_node) { - DRM_ERROR("failed to allocate map node.\n"); + if (!c_node) return -ENOMEM; - } /* create property id */ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, @@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node mutex_lock(&c_node->mem_lock); m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); - if (!m_node) { - DRM_ERROR("failed to allocate queue node.\n"); + if (!m_node) goto err_unlock; - } /* clear base address for error handling */ memset(&buf_info, 0x0, sizeof(buf_info)); @@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev, DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - DRM_ERROR("failed to allocate event.\n"); spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, struct exynos_drm_ipp_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - DRM_ERROR("failed to allocate priv.\n"); + if (!priv) return -ENOMEM; - } priv->dev = dev; file_priv->ipp_priv = priv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 98eb1f709b13..fcb0652e77d0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -265,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev, int err; exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); - if (!exynos_plane) { - DRM_ERROR("failed to allocate plane\n"); + if (!exynos_plane) return NULL; - } err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, &exynos_plane_funcs, formats, ARRAY_SIZE(formats), diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index a77cd5bb104c..7b901688defa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -472,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | @@ -713,10 +711,8 @@ static int rotator_probe(struct platform_device *pdev) } rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); - if (!rot) { - dev_err(dev, "failed to allocate rot\n"); + if (!rot) return -ENOMEM; - } match = of_match_node(exynos_rotator_match, dev->of_node); if (!match) { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index aecc601d540b..8ea07a106df2 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1825,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata) res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); - if (!res->regul_bulk) { - DRM_ERROR("failed to get memory for regulators\n"); + if (!res->regul_bulk) goto fail; - } for (i = 0; i < ARRAY_SIZE(supply); ++i) { res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; @@ -1869,10 +1867,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata u32 value; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); - if (!pd) { - DRM_ERROR("memory allocation for pdata failed\n"); + if (!pd) goto err_data; - } if (!of_find_property(np, "hpd-gpio", &value)) { DRM_ERROR("no hpd gpio property found\n"); @@ -1952,17 +1948,13 @@ static int hdmi_probe(struct platform_device *pdev) drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); - if (!drm_hdmi_ctx) { - DRM_ERROR("failed to allocate common hdmi context.\n"); + if (!drm_hdmi_ctx) return -ENOMEM; - } hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); - if (!hdata) { - DRM_ERROR("out of memory\n"); + if (!hdata) return -ENOMEM; - } mutex_init(&hdata->hdmi_mutex); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index f6cb1205a2fd..63bc5f92fbb3 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1186,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev) drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); - if (!drm_hdmi_ctx) { - DRM_ERROR("failed to allocate common hdmi context.\n"); + if (!drm_hdmi_ctx) return -ENOMEM; - } ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - DRM_ERROR("failed to alloc mixer context.\n"); + if (!ctx) return -ENOMEM; - } mutex_init(&ctx->mixer_mutex); -- cgit v1.2.3 From 4db7fcdf59551d9d0a9fe59bd919a32feae925b2 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 14 Aug 2013 16:38:03 +0530 Subject: drm/exynos: Add NULL pointer check devm_kzalloc can fail. Hence check the pointer to avoid NULL pointer dereferencing. Signed-off-by: Sachin Kamat Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_iommu.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c index 3799d5c2b5df..fb8db0378274 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev) dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + goto error; + dma_set_max_seg_size(dev, 0xffffffffu); dev->archdata.mapping = mapping; return 0; +error: + arm_iommu_release_mapping(mapping); + return -ENOMEM; } /* @@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, sizeof(*subdrv_dev->dma_parms), GFP_KERNEL); + if (!subdrv_dev->dma_parms) + return -ENOMEM; + dma_set_max_seg_size(subdrv_dev, 0xffffffffu); ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); -- cgit v1.2.3 From a4f38a80f62bb613525563860cbca87f583081b5 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 20 Aug 2013 13:51:02 +0900 Subject: drm/exynos: fix fimd pixel format setting This patch fixes wrong pixel format setting. A pixel format is decided according to bpp and depth, or user-requested format but fimd driver considered only bpp value to decide a proper pixel format. So this patch makes a proper pixel format to be set according to drm_framebuffer's pixel_format which is set by addfb with bpp and depth, or addfb2 with user-requested format. Signed-off-by: Inki Dae Reviewed-by: Tomasz Figa Signed-off-by: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 34 ++++++++++++-------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index b8aa8fee8201..13d5afbc0dfd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -91,6 +91,7 @@ struct fimd_win_data { unsigned int fb_width; unsigned int fb_height; unsigned int bpp; + unsigned int pixel_format; dma_addr_t dma_addr; unsigned int buf_offsize; unsigned int line_size; /* bytes */ @@ -397,6 +398,7 @@ static void fimd_win_mode_set(struct device *dev, win_data->fb_height = overlay->fb_height; win_data->dma_addr = overlay->dma_addr[0] + offset; win_data->bpp = overlay->bpp; + win_data->pixel_format = overlay->pixel_format; win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * (overlay->bpp >> 3); win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); @@ -418,39 +420,29 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win) val = WINCONx_ENWIN; - switch (win_data->bpp) { - case 1: - val |= WINCON0_BPPMODE_1BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_4WORD; - break; - case 2: - val |= WINCON0_BPPMODE_2BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_8WORD; - break; - case 4: - val |= WINCON0_BPPMODE_4BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_8WORD; - break; - case 8: + switch (win_data->pixel_format) { + case DRM_FORMAT_C8: val |= WINCON0_BPPMODE_8BPP_PALETTE; val |= WINCONx_BURSTLEN_8WORD; val |= WINCONx_BYTSWP; break; - case 16: + case DRM_FORMAT_XRGB1555: + val |= WINCON0_BPPMODE_16BPP_1555; + val |= WINCONx_HAWSWP; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_RGB565: val |= WINCON0_BPPMODE_16BPP_565; val |= WINCONx_HAWSWP; val |= WINCONx_BURSTLEN_16WORD; break; - case 24: + case DRM_FORMAT_XRGB8888: val |= WINCON0_BPPMODE_24BPP_888; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; break; - case 32: - val |= WINCON1_BPPMODE_28BPP_A4888 + case DRM_FORMAT_ARGB8888: + val |= WINCON1_BPPMODE_25BPP_A1888 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; -- cgit v1.2.3 From 5cc4621a17b1e63738658a93b9a5667c876a22e4 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 20 Aug 2013 14:28:56 +0900 Subject: drm/exynos: check a pixel format to a particular window layer This patch checks if a requested window supports alpha channel or not. In case of s3c64xx, window 0 doesn't support alpha channel so if the request pixel format is ARGB8888 then change it to XRGB8888. Signed-off-by: Inki Dae Reviewed-by: Tomasz Figa Signed-off-by: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 13d5afbc0dfd..f8889d28382d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -66,11 +66,13 @@ struct fimd_driver_data { unsigned int has_shadowcon:1; unsigned int has_clksel:1; + unsigned int has_limited_fmt:1; }; static struct fimd_driver_data s3c64xx_fimd_driver_data = { .timing_base = 0x0, .has_clksel = 1, + .has_limited_fmt = 1, }; static struct fimd_driver_data exynos4_fimd_driver_data = { @@ -420,6 +422,15 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win) val = WINCONx_ENWIN; + /* + * In case of s3c64xx, window 0 doesn't support alpha channel. + * So the request format is ARGB8888 then change it to XRGB8888. + */ + if (ctx->driver_data->has_limited_fmt && !win) { + if (win_data->pixel_format == DRM_FORMAT_ARGB8888) + win_data->pixel_format = DRM_FORMAT_XRGB8888; + } + switch (win_data->pixel_format) { case DRM_FORMAT_C8: val |= WINCON0_BPPMODE_8BPP_PALETTE; -- cgit v1.2.3 From 111e6055d4e0d35c6a4b6cd37d7bb00a88eaffb4 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Wed, 21 Aug 2013 16:22:01 +0200 Subject: drm/exynos: fimd: replace struct fb_videomode with videomode The patch replaces all occurrences of struct fb_videomode by more accurate struct videomode. The change allows to remove mode conversion function and simplifies clock divider calculation. Clock configuration is moved to separate function. Signed-off-by: Andrzej Hajda Signed-off-by: Kyungmin Park Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_connector.c | 33 +------ drivers/gpu/drm/exynos/exynos_drm_fimd.c | 131 +++++++++++++------------- include/drm/exynos_drm.h | 3 +- 3 files changed, 70 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index de7c7b294d2c..e082efb2fece 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -29,35 +29,6 @@ struct exynos_drm_connector { uint32_t dpms; }; -/* convert exynos_video_timings to drm_display_mode */ -static inline void -convert_to_display_mode(struct drm_display_mode *mode, - struct exynos_drm_panel_info *panel) -{ - struct fb_videomode *timing = &panel->timing; - - mode->clock = timing->pixclock / 1000; - mode->vrefresh = timing->refresh; - - mode->hdisplay = timing->xres; - mode->hsync_start = mode->hdisplay + timing->right_margin; - mode->hsync_end = mode->hsync_start + timing->hsync_len; - mode->htotal = mode->hsync_end + timing->left_margin; - - mode->vdisplay = timing->yres; - mode->vsync_start = mode->vdisplay + timing->lower_margin; - mode->vsync_end = mode->vsync_start + timing->vsync_len; - mode->vtotal = mode->vsync_end + timing->upper_margin; - mode->width_mm = panel->width_mm; - mode->height_mm = panel->height_mm; - - if (timing->vmode & FB_VMODE_INTERLACED) - mode->flags |= DRM_MODE_FLAG_INTERLACE; - - if (timing->vmode & FB_VMODE_DOUBLE) - mode->flags |= DRM_MODE_FLAG_DBLSCAN; -} - static int exynos_drm_connector_get_modes(struct drm_connector *connector) { struct exynos_drm_connector *exynos_connector = @@ -112,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) return 0; } - convert_to_display_mode(mode, panel); + drm_display_mode_from_videomode(&panel->vm, mode); + mode->width_mm = panel->width_mm; + mode->height_mm = panel->height_mm; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index f8889d28382d..a183ea7dbd55 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -21,6 +21,7 @@ #include #include