summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 11:46:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 11:46:25 -0700
commitb6daa51b9a6a02a644dcf6b880fd50c1f70ec07f (patch)
tree745ebeefb8f4225460774ef0037ca2f9022d4a4a /arch/x86
parentf96ed2612260a8a415512eed4fe3f5c77247d4a1 (diff)
parent9b7396624a7b503220d85428654634b60762f2b0 (diff)
Merge branch 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: - Nick improved generic implementations of percpu operations which modify the variable and return so that they calculate the physical address only once. - percpu_ref percpu <-> atomic mode switching improvements. The patchset was originally posted about a year ago but fell through the crack. - misc non-critical fixes. * 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: mm/percpu.c: fix potential memory leakage for pcpu_embed_first_chunk() mm/percpu.c: correct max_distance calculation for pcpu_embed_first_chunk() percpu: eliminate two sparse warnings percpu: improve generic percpu modify-return implementation percpu-refcount: init ->confirm_switch member properly percpu_ref: allow operation mode switching operations to be called concurrently percpu_ref: restructure operation mode switching percpu_ref: unify staggered atomic switching wait behavior percpu_ref: reorganize __percpu_ref_switch_to_atomic() and relocate percpu_ref_switch_to_atomic() percpu_ref: remove unnecessary RCU grace period for staggered atomic switching confirmation
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/percpu.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e02e3f80d363..84f58de08c2b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -521,7 +521,8 @@ do { \
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
- unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
+ unsigned long __percpu *a =
+ (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
#ifdef CONFIG_X86_64
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
@@ -538,7 +539,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr,
asm volatile("bt "__percpu_arg(2)",%1\n\t"
CC_SET(c)
: CC_OUT(c) (oldbit)
- : "m" (*(unsigned long *)addr), "Ir" (nr));
+ : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
return oldbit;
}