summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/local_lock_internal.h3
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/rwlock.h3
-rw-r--r--include/linux/rwlock_rt.h1
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/seqlock.h1
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/spinlock_rt.h1
-rw-r--r--include/linux/ww_mutex.h1
9 files changed, 2 insertions, 16 deletions
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index ed2f3fb4c360..eff711bf973f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -87,13 +87,11 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
local_lock_debug_init(lock); \
- __assume_ctx_lock(lock); \
} while (0)
#define __local_trylock_init(lock) \
do { \
__local_lock_init((local_lock_t *)lock); \
- __assume_ctx_lock(lock); \
} while (0)
#define __spinlock_nested_bh_init(lock) \
@@ -105,7 +103,6 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_NORMAL); \
local_lock_debug_init(lock); \
- __assume_ctx_lock(lock); \
} while (0)
#define __local_lock_acquire(lock) \
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 6b12009351d2..ecaa0440f6ec 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -62,7 +62,6 @@ do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
- __assume_ctx_lock(mutex); \
} while (0)
/**
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 65a5b55e1bcd..3390d21c95dd 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -22,11 +22,10 @@ do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
- __assume_ctx_lock(lock); \
} while (0)
#else
# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0)
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 37b387dcab21..5353abbfdc0b 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -22,7 +22,6 @@ do { \
\
init_rwbase_rt(&(rwl)->rwbase); \
__rt_rwlock_init(rwl, #rwl, &__key); \
- __assume_ctx_lock(rwl); \
} while (0)
extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ea1bbdb57a47..9bf1d93d3d7b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -121,7 +121,6 @@ do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
- __assume_ctx_lock(sem); \
} while (0)
/*
@@ -175,7 +174,6 @@ do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
- __assume_ctx_lock(sem); \
} while (0)
static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 077c8d5b2afd..5a40252b8334 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -817,7 +817,6 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
do { \
spin_lock_init(&(sl)->lock); \
seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
- __assume_ctx_lock(sl); \
} while (0)
/**
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7b11991c742a..e1e2f144af9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -106,12 +106,11 @@ do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
- __assume_ctx_lock(lock); \
} while (0)
#else
# define raw_spin_lock_init(lock) \
- do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0)
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
@@ -324,7 +323,6 @@ do { \
\
__raw_spin_lock_init(spinlock_check(lock), \
#lock, &__key, LD_WAIT_CONFIG); \
- __assume_ctx_lock(lock); \
} while (0)
#else
@@ -333,7 +331,6 @@ do { \
do { \
spinlock_check(_lock); \
*(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
- __assume_ctx_lock(_lock); \
} while (0)
#endif
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 0a585768358f..373618a4243c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -20,7 +20,6 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
do { \
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, name, key, percpu); \
- __assume_ctx_lock(slock); \
} while (0)
#define _spin_lock_init(slock, percpu) \
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 58e959ee10e9..c47d4b9b88b3 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -107,7 +107,6 @@ context_lock_struct(ww_acquire_ctx) {
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
- __assumes_ctx_lock(lock)
{
ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;