diff options
| author | Nicolai Hähnle <Nicolai.Haehnle@amd.com> | 2016-12-21 19:46:32 +0100 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2017-01-14 11:14:40 +0100 | 
| commit | ea9e0fb8fe1bdfca81bd76052a5cce70bb053430 (patch) | |
| tree | 067f2df58cab95eb1096fcc7eac74b70c8631dae | |
| parent | 3822da3ed0676e01f83fe0518c333c8e9ba249bf (diff) | |
locking/ww_mutex: Set use_ww_ctx even when locking without a context
We will add a new field to struct mutex_waiter.  This field must be
initialized for all waiters if any waiter uses the ww_use_ctx path.
So there is a trade-off: Keep ww_mutex locking without a context on
the faster non-use_ww_ctx path, at the cost of adding the
initialization to all mutex locks (including non-ww_mutexes), or avoid
the additional cost for non-ww_mutex locks, at the cost of adding
additional checks to the use_ww_ctx path.
We take the latter choice.  It may be worth eliminating the users of
ww_mutex_lock(lock, NULL), but there are a lot of them.
Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dri-devel@lists.freedesktop.org
Link: http://lkml.kernel.org/r/1482346000-9927-5-git-send-email-nhaehnle@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | include/linux/ww_mutex.h | 11 | ||||
| -rw-r--r-- | kernel/locking/mutex.c | 29 | 
2 files changed, 19 insertions, 21 deletions
| diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 7b0066814fa0..5f2e8379baff 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -222,11 +222,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,   */  static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  { -	if (ctx) -		return __ww_mutex_lock(lock, ctx); - -	mutex_lock(&lock->base); -	return 0; +	return __ww_mutex_lock(lock, ctx);  }  /** @@ -262,10 +258,7 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct  static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,  							   struct ww_acquire_ctx *ctx)  { -	if (ctx) -		return __ww_mutex_lock_interruptible(lock, ctx); -	else -		return mutex_lock_interruptible(&lock->base); +	return __ww_mutex_lock_interruptible(lock, ctx);  }  /** diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 9ad03b9a5f7f..44a64c0a851a 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -469,7 +469,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,  	for (;;) {  		struct task_struct *owner; -		if (use_ww_ctx && ww_ctx->acquired > 0) { +		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {  			struct ww_mutex *ww;  			ww = container_of(lock, struct ww_mutex, base); @@ -629,8 +629,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	struct ww_mutex *ww;  	int ret; -	if (use_ww_ctx) { -		ww = container_of(lock, struct ww_mutex, base); +	ww = container_of(lock, struct ww_mutex, base); + +	if (use_ww_ctx && ww_ctx) {  		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))  			return -EALREADY;  	} @@ -642,7 +643,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {  		/* got the lock, yay! */  		lock_acquired(&lock->dep_map, ip); -		if (use_ww_ctx) +		if (use_ww_ctx && ww_ctx)  			ww_mutex_set_context_fastpath(ww, ww_ctx);  		preempt_enable();  		return 0; @@ -688,7 +689,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  			goto err;  		} -		if (use_ww_ctx && ww_ctx->acquired > 0) { +		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {  			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);  			if (ret)  				goto err; @@ -728,7 +729,7 @@ skip_wait:  	/* got the lock - cleanup and rejoice! */  	lock_acquired(&lock->dep_map, ip); -	if (use_ww_ctx) +	if (use_ww_ctx && ww_ctx)  		ww_mutex_set_context_slowpath(ww, ww_ctx);  	spin_unlock_mutex(&lock->wait_lock, flags); @@ -816,8 +817,9 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, -				   0, &ctx->dep_map, _RET_IP_, ctx, 1); -	if (!ret && ctx->acquired > 1) +				   0, ctx ? &ctx->dep_map : NULL, _RET_IP_, +				   ctx, 1); +	if (!ret && ctx && ctx->acquired > 1)  		return ww_mutex_deadlock_injection(lock, ctx);  	return ret; @@ -831,9 +833,10 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, -				  0, &ctx->dep_map, _RET_IP_, ctx, 1); +				  0, ctx ? &ctx->dep_map : NULL, _RET_IP_, +				  ctx, 1); -	if (!ret && ctx->acquired > 1) +	if (!ret && ctx && ctx->acquired > 1)  		return ww_mutex_deadlock_injection(lock, ctx);  	return ret; @@ -1021,7 +1024,8 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	if (__mutex_trylock_fast(&lock->base)) { -		ww_mutex_set_context_fastpath(lock, ctx); +		if (ctx) +			ww_mutex_set_context_fastpath(lock, ctx);  		return 0;  	} @@ -1035,7 +1039,8 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	if (__mutex_trylock_fast(&lock->base)) { -		ww_mutex_set_context_fastpath(lock, ctx); +		if (ctx) +			ww_mutex_set_context_fastpath(lock, ctx);  		return 0;  	} | 
