diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-06-15 11:02:21 +0200 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2011-12-28 16:25:46 -0600 |
commit | e30d63537b498f036d6bbbcd704d0d0fd0fcb6f9 (patch) | |
tree | 1a22341b086c23f9d6d788c81abc60779fedb197 | |
parent | 495f573c85270b51a11b8e78190a282faee645d7 (diff) |
lglocks-rt.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/lglock.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index f549056fb20b..433f12d47cb3 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -70,6 +70,9 @@ extern void name##_global_lock_online(void); \ extern void name##_global_unlock_online(void); \ + +#ifndef CONFIG_PREEMPT_RT_FULL + #define DEFINE_LGLOCK(name) \ \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ @@ -169,4 +172,101 @@ preempt_enable(); \ } \ EXPORT_SYMBOL(name##_global_unlock); + +#else /* !PREEMPT_RT_FULL */ +#define DEFINE_LGLOCK(name) \ + \ + DEFINE_PER_CPU(struct rt_mutex, name##_lock); \ + DEFINE_LGLOCK_LOCKDEP(name); \ + \ + void name##_lock_init(void) { \ + int i; \ + LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ + for_each_possible_cpu(i) { \ + struct rt_mutex *lock; \ + lock = &per_cpu(name##_lock, i); \ + rt_mutex_init(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_lock_init); \ + \ + void name##_local_lock(void) { \ + struct rt_mutex *lock; \ + migrate_disable(); \ + rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ + lock = &__get_cpu_var(name##_lock); \ + __rt_spin_lock(lock); \ + } \ + EXPORT_SYMBOL(name##_local_lock); \ + \ + void name##_local_unlock(void) { \ + struct rt_mutex *lock; \ + rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ + lock = &__get_cpu_var(name##_lock); \ + __rt_spin_unlock(lock); \ + migrate_enable(); \ + } \ + EXPORT_SYMBOL(name##_local_unlock); \ + \ + void name##_local_lock_cpu(int cpu) { \ + struct rt_mutex *lock; \ + rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ + lock = &per_cpu(name##_lock, cpu); \ + __rt_spin_lock(lock); \ + } \ + EXPORT_SYMBOL(name##_local_lock_cpu); \ + \ + void name##_local_unlock_cpu(int cpu) { \ + struct rt_mutex *lock; \ + rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ + lock = &per_cpu(name##_lock, cpu); \ + __rt_spin_unlock(lock); \ + } \ + EXPORT_SYMBOL(name##_local_unlock_cpu); \ + \ + void name##_global_lock_online(void) { \ + int i; \ + rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ + for_each_online_cpu(i) { \ + struct rt_mutex *lock; \ + lock = &per_cpu(name##_lock, i); \ + __rt_spin_lock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_lock_online); \ + \ + void name##_global_unlock_online(void) { \ + int i; \ + rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ + for_each_online_cpu(i) { \ + struct rt_mutex *lock; \ + lock = &per_cpu(name##_lock, i); \ + __rt_spin_unlock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_unlock_online); \ + \ + void name##_global_lock(void) { \ + int i; \ + rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ + for_each_possible_cpu(i) { \ + struct rt_mutex *lock; \ + lock = &per_cpu(name##_lock, i); \ + __rt_spin_lock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_lock); \ + \ + void name##_global_unlock(void) { \ + int i; \ + rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ + for_each_possible_cpu(i) { \ + struct rt_mutex *lock; \ + lock = &per_cpu(name##_lock, i); \ + __rt_spin_unlock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_unlock); +#endif /* PRREMPT_RT_FULL */ + #endif |