diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-22 19:25:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-22 19:25:09 -0800 |
commit | 3b5d8510b94a95e493e8c4951ffc3d1cf6a6792d (patch) | |
tree | 4493367715c7a15e9057e6c72fa3c3edfd0605ad /kernel | |
parent | c47f39e3b75e1138823984ad5079547c7a41b726 (diff) | |
parent | 41ef8f826692c8f65882bec0a8211bd4d1d2d19a (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar:
"The biggest change is the rwsem lock-steal improvements, both to the
assembly optimized and the spinlock based variants.
The other notable change is the clean up of the seqlock implementation
to be based on the seqcount infrastructure.
The rest is assorted smaller debuggability, cleanup and continued -rt
locking changes."
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
rwsem-spinlock: Implement writer lock-stealing for better scalability
futex: Revert "futex: Mark get_robust_list as deprecated"
generic: Use raw local irq variant for generic cmpxchg
lockdep: Selftest: convert spinlock to raw spinlock
seqlock: Use seqcount infrastructure
seqlock: Remove unused functions
ntp: Make ntp_lock raw
intel_idle: Convert i7300_idle_lock to raw_spinlock
locking: Various static lock initializer fixes
lockdep: Print more info when MAX_LOCK_DEPTH is exceeded
rwsem: Implement writer lock-stealing for better scalability
lockdep: Silence warning if CONFIG_LOCKDEP isn't set
watchdog: Use local_clock for get_timestamp()
lockdep: Rename print_unlock_inbalance_bug() to print_unlock_imbalance_bug()
locking/stat: Fix a typo
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 2 | ||||
-rw-r--r-- | kernel/futex_compat.c | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 15 | ||||
-rw-r--r-- | kernel/time/ntp.c | 26 | ||||
-rw-r--r-- | kernel/watchdog.c | 10 |
5 files changed, 27 insertions, 28 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 9618b6e9fb36..fbc07a29ec53 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2472,8 +2472,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, if (!futex_cmpxchg_enabled) return -ENOSYS; - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); - rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 83e368b005fc..a9642d528630 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, if (!futex_cmpxchg_enabled) return -ENOSYS; - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); - rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7981e5b2350d..8a0efac4f99d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3190,9 +3190,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, #endif if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { debug_locks_off(); - printk("BUG: MAX_LOCK_DEPTH too low!\n"); + printk("BUG: MAX_LOCK_DEPTH too low, depth: %i max: %lu!\n", + curr->lockdep_depth, MAX_LOCK_DEPTH); printk("turning off the locking correctness validator.\n"); + + lockdep_print_held_locks(current); + debug_show_all_locks(); dump_stack(); + return 0; } @@ -3203,7 +3208,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, } static int -print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, +print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (!debug_locks_off()) @@ -3246,7 +3251,7 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, return 0; if (curr->lockdep_depth <= 0) - return print_unlock_inbalance_bug(curr, lock, ip); + return print_unlock_imbalance_bug(curr, lock, ip); return 1; } @@ -3317,7 +3322,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name, goto found_it; prev_hlock = hlock; } - return print_unlock_inbalance_bug(curr, lock, ip); + return print_unlock_imbalance_bug(curr, lock, ip); found_it: lockdep_init_map(lock, name, key, 0); @@ -3384,7 +3389,7 @@ lock_release_non_nested(struct task_struct *curr, goto found_it; prev_hlock = hlock; } - return print_unlock_inbalance_bug(curr, lock, ip); + return print_unlock_imbalance_bug(curr, lock, ip); found_it: if (hlock->instance == lock) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index b10a42bb0165..072bb066bb7d 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -23,7 +23,7 @@ * NTP timekeeping variables: */ -DEFINE_SPINLOCK(ntp_lock); +DEFINE_RAW_SPINLOCK(ntp_lock); /* USER_HZ period (usecs): */ @@ -348,7 +348,7 @@ void ntp_clear(void) { unsigned long flags; - spin_lock_irqsave(&ntp_lock, flags); + raw_spin_lock_irqsave(&ntp_lock, flags); time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; @@ -362,7 +362,7 @@ void ntp_clear(void) /* Clear PPS state variables */ pps_clear(); - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); } @@ -372,9 +372,9 @@ u64 ntp_tick_length(void) unsigned long flags; s64 ret; - spin_lock_irqsave(&ntp_lock, flags); + raw_spin_lock_irqsave(&ntp_lock, flags); ret = tick_length; - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); return ret; } @@ -395,7 +395,7 @@ int second_overflow(unsigned long secs) int leap = 0; unsigned long flags; - spin_lock_irqsave(&ntp_lock, flags); + raw_spin_lock_irqsave(&ntp_lock, flags); /* * Leap second processing. If in leap-insert state at the end of the @@ -479,7 +479,7 @@ int second_overflow(unsigned long secs) time_adjust = 0; out: - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); return leap; } @@ -672,7 +672,7 @@ int do_adjtimex(struct timex *txc) getnstimeofday(&ts); - spin_lock_irq(&ntp_lock); + raw_spin_lock_irq(&ntp_lock); if (txc->modes & ADJ_ADJTIME) { long save_adjust = time_adjust; @@ -714,7 +714,7 @@ int do_adjtimex(struct timex *txc) /* fill PPS status fields */ pps_fill_timex(txc); - spin_unlock_irq(&ntp_lock); + raw_spin_unlock_irq(&ntp_lock); txc->time.tv_sec = ts.tv_sec; txc->time.tv_usec = ts.tv_nsec; @@ -912,7 +912,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) pts_norm = pps_normalize_ts(*phase_ts); - spin_lock_irqsave(&ntp_lock, flags); + raw_spin_lock_irqsave(&ntp_lock, flags); /* clear the error bits, they will be set again if needed */ time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); @@ -925,7 +925,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) * just start the frequency interval */ if (unlikely(pps_fbase.tv_sec == 0)) { pps_fbase = *raw_ts; - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); return; } @@ -940,7 +940,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) time_status |= STA_PPSJITTER; /* restart the frequency calibration interval */ pps_fbase = *raw_ts; - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); pr_err("hardpps: PPSJITTER: bad pulse\n"); return; } @@ -957,7 +957,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) hardpps_update_phase(pts_norm.nsec); - spin_unlock_irqrestore(&ntp_lock, flags); + raw_spin_unlock_irqrestore(&ntp_lock, flags); } EXPORT_SYMBOL(hardpps); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 27689422aa92..4a944676358e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -113,9 +113,9 @@ static int get_softlockup_thresh(void) * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ -static unsigned long get_timestamp(int this_cpu) +static unsigned long get_timestamp(void) { - return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ + return local_clock() >> 30LL; /* 2^30 ~= 10^9 */ } static void set_sample_period(void) @@ -133,9 +133,7 @@ static void set_sample_period(void) /* Commands for resetting the watchdog */ static void __touch_watchdog(void) { - int this_cpu = smp_processor_id(); - - __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); + __this_cpu_write(watchdog_touch_ts, get_timestamp()); } void touch_softlockup_watchdog(void) @@ -196,7 +194,7 @@ static int is_hardlockup(void) static int is_softlockup(unsigned long touch_ts) { - unsigned long now = get_timestamp(smp_processor_id()); + unsigned long now = get_timestamp(); /* Warn about unreasonable delays: */ if (time_after(now, touch_ts + get_softlockup_thresh())) |