diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:12:01 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:12:01 -0800 |
| commit | 9eef02334505411667a7b51a8f349f8c6c4f3b66 (patch) | |
| tree | 3a0c8fb85d76595b2f9468d3e31f41147a43ed55 /kernel/kcsan/core.c | |
| parent | d089f48fba28db14d0fe7753248f2575a9ddfc73 (diff) | |
| parent | 3765d01bab73bdb920ef711203978f02cd26e4da (diff) | |
Merge tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Core locking primitives updates:
- Remove mutex_trylock_recursive() from the API - no users left
- Simplify + constify the futex code a bit
Lockdep updates:
- Teach lockdep about local_lock_t
- Add CONFIG_DEBUG_IRQFLAGS=y debug config option to check for
potentially unsafe IRQ mask restoration patterns. (I.e.
calling raw_local_irq_restore() with IRQs enabled.)
- Add wait context self-tests
- Fix graph lock corner case corrupting internal data structures
- Fix noinstr annotations
LKMM updates:
- Simplify the litmus tests
- Documentation fixes
KCSAN updates:
- Re-enable KCSAN instrumentation in lib/random32.c
Misc fixes:
- Don't branch-trace static label APIs
- DocBook fix
- Remove stale leftover empty file"
* tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
checkpatch: Don't check for mutex_trylock_recursive()
locking/mutex: Kill mutex_trylock_recursive()
s390: Use arch_local_irq_{save,restore}() in early boot code
lockdep: Noinstr annotate warn_bogus_irq_restore()
locking/lockdep: Avoid unmatched unlock
locking/rwsem: Remove empty rwsem.h
locking/rtmutex: Add missing kernel-doc markup
futex: Remove unneeded gotos
futex: Change utime parameter to be 'const ... *'
lockdep: report broken irq restoration
jump_label: Do not profile branch annotations
locking: Add Reviewers
locking/selftests: Add local_lock inversion tests
locking/lockdep: Exclude local_lock_t from IRQ inversions
locking/lockdep: Clean up check_redundant() a bit
locking/lockdep: Add a skip() function to __bfs()
locking/lockdep: Mark local_lock_t
locking/selftests: More granular debug_locks_verbose
lockdep/selftest: Add wait context selftests
tools/memory-model: Fix typo in klitmus7 compatibility table
...
Diffstat (limited to 'kernel/kcsan/core.c')
| -rw-r--r-- | kernel/kcsan/core.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 3994a217bde7..3bf98db9c702 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -12,7 +12,6 @@ #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/preempt.h> -#include <linux/random.h> #include <linux/sched.h> #include <linux/uaccess.h> @@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; static DEFINE_PER_CPU(long, kcsan_skip); /* For kcsan_prandom_u32_max(). */ -static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state); +static DEFINE_PER_CPU(u32, kcsan_rand_state); static __always_inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, @@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx * } /* - * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max() - * for more details. - * - * The open-coded version here is using only safe primitives for all contexts - * where we can have KCSAN instrumentation. In particular, we cannot use - * prandom_u32() directly, as its tracepoint could cause recursion. + * Returns a pseudo-random number in interval [0, ep_ro). Simple linear + * congruential generator, using constants from "Numerical Recipes". */ static u32 kcsan_prandom_u32_max(u32 ep_ro) { - struct rnd_state *state = &get_cpu_var(kcsan_rand_state); - const u32 res = prandom_u32_state(state); + u32 state = this_cpu_read(kcsan_rand_state); + + state = 1664525 * state + 1013904223; + this_cpu_write(kcsan_rand_state, state); - put_cpu_var(kcsan_rand_state); - return (u32)(((u64) res * ep_ro) >> 32); + return state % ep_ro; } static inline void reset_kcsan_skip(void) @@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, void __init kcsan_init(void) { + int cpu; + BUG_ON(!in_task()); kcsan_debugfs_init(); - prandom_seed_full_state(&kcsan_rand_state); + + for_each_possible_cpu(cpu) + per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles(); /* * We are in the init task, and no other tasks should be running; |
