diff options
author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2007-05-08 00:34:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 11:15:20 -0700 |
commit | 7232311ef14c274d88871212a07557f18f4140d1 (patch) | |
tree | 46027210d51dee5fd5086f159d98bac3535a005d /include/asm-mips/local.h | |
parent | 4431f46f5fe0e3b740dfaf09ba34f0b14688185e (diff) |
local_t: mips extension
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-mips/local.h')
-rw-r--r-- | include/asm-mips/local.h | 304 |
1 files changed, 266 insertions, 38 deletions
diff --git a/include/asm-mips/local.h b/include/asm-mips/local.h index 9e2d43bae388..ed882c88e0ca 100644 --- a/include/asm-mips/local.h +++ b/include/asm-mips/local.h @@ -1,60 +1,288 @@ -#ifndef _ASM_LOCAL_H -#define _ASM_LOCAL_H +#ifndef _ARCH_MIPS_LOCAL_H +#define _ARCH_MIPS_LOCAL_H #include <linux/percpu.h> +#include <linux/bitops.h> #include <asm/atomic.h> +#include <asm/war.h> -#ifdef CONFIG_32BIT +typedef struct +{ + atomic_long_t a; +} local_t; -typedef atomic_t local_t; +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } -#define LOCAL_INIT(i) ATOMIC_INIT(i) -#define local_read(v) atomic_read(v) -#define local_set(v,i) atomic_set(v,i) +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l,i) atomic_long_set(&(l)->a, (i)) -#define local_inc(v) atomic_inc(v) -#define local_dec(v) atomic_dec(v) -#define local_add(i, v) atomic_add(i, v) -#define local_sub(i, v) atomic_sub(i, v) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) -#endif +/* + * Same as above, but return the result value + */ +static __inline__ long local_add_return(long i, local_t * l) +{ + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_add_return \n" + " addu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqzl %0, 1b \n" + " addu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_add_return \n" + " addu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqz %0, 1b \n" + " addu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else { + unsigned long flags; -#ifdef CONFIG_64BIT + local_irq_save(flags); + result = l->a.counter; + result += i; + l->a.counter = result; + local_irq_restore(flags); + } -typedef atomic64_t local_t; + return result; +} -#define LOCAL_INIT(i) ATOMIC64_INIT(i) -#define local_read(v) atomic64_read(v) -#define local_set(v,i) atomic64_set(v,i) +static __inline__ long local_sub_return(long i, local_t * l) +{ + unsigned long result; -#define local_inc(v) atomic64_inc(v) -#define local_dec(v) atomic64_dec(v) -#define local_add(i, v) atomic64_add(i, v) -#define local_sub(i, v) atomic64_sub(i, v) + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; -#endif + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_return \n" + " subu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqzl %0, 1b \n" + " subu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; -#define __local_inc(v) ((v)->counter++) -#define __local_dec(v) ((v)->counter--) -#define __local_add(i,v) ((v)->counter+=(i)) -#define __local_sub(i,v) ((v)->counter-=(i)) + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_return \n" + " subu %0, %1, %3 \n" + __SC "%0, %2 \n" + " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result -= i; + l->a.counter = result; + local_irq_restore(flags); + } + + return result; +} /* - * Use these for per-cpu local_t variables: on some archs they are + * local_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically test @l and subtract @i if @l is greater or equal than @i. + * The function returns the old value of @l minus @i. + */ +static __inline__ long local_sub_if_positive(long i, local_t * l) +{ + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + __SC "%0, %2 \n" + " .set noreorder \n" + " beqzl %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + __SC "%0, %2 \n" + " .set noreorder \n" + " beqz %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result -= i; + if (result >= 0) + l->a.counter = result; + local_irq_restore(flags); + } + + return result; +} + +#define local_cmpxchg(l, o, n) \ + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#define local_dec_return(l) local_sub_return(1,(l)) +#define local_inc_return(l) local_add_return(1,(l)) + +/* + * local_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically subtracts @i from @l and returns + * true if the result is zero, or false for all + * other cases. + */ +#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0) + +/* + * local_inc_and_test - increment and test + * @l: pointer of type local_t + * + * Atomically increments @l by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define local_inc_and_test(l) (local_inc_return(l) == 0) + +/* + * local_dec_and_test - decrement by 1 and test + * @l: pointer of type local_t + * + * Atomically decrements @l by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) + +/* + * local_dec_if_positive - decrement by 1 if old value positive + * @l: pointer of type local_t + */ +#define local_dec_if_positive(l) local_sub_if_positive(1, l) + +/* + * local_add_negative - add and test if negative + * @l: pointer of type local_t + * @i: integer value to add + * + * Atomically adds @i to @l and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define local_add_negative(i,l) (local_add_return(i, (l)) < 0) + +/* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i,l) ((l)->a.counter+=(i)) +#define __local_sub(i,l) ((l)->a.counter-=(i)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(l) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(l) \ + ({ preempt_disable(); \ + l; \ + preempt_enable(); }) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) -#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) -#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) -#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) -#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) +#define __cpu_local_inc(l) cpu_local_inc(l) +#define __cpu_local_dec(l) cpu_local_dec(l) +#define __cpu_local_add(i, l) cpu_local_add((i), (l)) +#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) -#endif /* _ASM_LOCAL_H */ +#endif /* _ARCH_MIPS_LOCAL_H */ |