summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-04 16:32:36 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-04 20:20:02 +0100
commitb5add9591ca5b869b8c9c559e16ccab8a8ba4727 (patch)
tree4beced6e04330424accd83ef57ab79c798541209
parent5d723d7afd320e687ebb59f7ac741b0ab02d77e8 (diff)
drm/i915: Make fb_tracking.lock a spinlock
We only need a very lightweight mechanism here as the locking is only used for co-ordinating a bitfield. v2: Move the cheap unlikely tests into the caller v3: Move the kerneldoc into the header (now separated out into intel_fronbuffer.h for better kerneldoc and readability) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtien <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-20-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c73
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h50
4 files changed, 71 insertions, 56 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ba99057c317..b26f5b1f466b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1669,7 +1669,7 @@ struct intel_pipe_crc {
};
struct i915_frontbuffer_tracking {
- struct mutex lock;
+ spinlock_t lock;
/*
* Tracking bits for delayed frontbuffer flushing du to gpu activity or
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1960705c50c2..68110b9d98b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4455,7 +4455,7 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- mutex_init(&dev_priv->fb_tracking.lock);
+ spin_lock_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_load_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index b83a70016b12..f15486aee7f8 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -66,35 +66,19 @@
#include "intel_frontbuffer.h"
#include "i915_drv.h"
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (!obj->frontbuffer_bits)
- return;
-
if (origin == ORIGIN_CS) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits
- |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits
- &= ~obj->frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+ spin_unlock(&dev_priv->fb_tracking.lock);
}
intel_psr_invalidate(dev, obj->frontbuffer_bits);
@@ -121,9 +105,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Delay flushing when rings are still busy.*/
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -133,18 +117,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -152,21 +127,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (!obj->frontbuffer_bits)
- return;
-
frontbuffer_bits = obj->frontbuffer_bits;
if (retire) {
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
}
- intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+ if (frontbuffer_bits)
+ intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
}
/**
@@ -186,11 +158,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
intel_psr_single_frame_update(dev, frontbuffer_bits);
}
@@ -210,13 +182,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+ if (frontbuffer_bits)
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
/**
@@ -235,10 +208,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 3d0061454bd5..60a0ec179108 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -28,15 +28,57 @@ struct drm_device;
struct drm_i915_private;
struct drm_i915_gem_object;
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
- enum fb_op_origin origin);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (!obj->frontbuffer_bits)
+ return;
+
+ __intel_fb_obj_invalidate(obj, origin);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin)
+{
+ if (!obj->frontbuffer_bits)
+ return;
+
+ __intel_fb_obj_flush(obj, retire, origin);
+}
#endif /* __INTEL_FRONTBUFFER_H__ */