diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
commit | 9e804d1f58da1eca079f796347c1cf1d1df564e2 (patch) | |
tree | 5fc958822d1d44509a9003be88490ede36ae12f9 /include | |
parent | eecc16ba9a49b05dd847a317af166a6728eb56ca (diff) |
percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch
percpu_ref will be restructured so that percpu/atomic mode switching
and reference killing are dedoupled. In preparation, do the following
renames.
* percpu_ref->confirm_kill -> percpu_ref->confirm_switch
* __PERCPU_REF_DEAD -> __PERCPU_REF_ATOMIC
* __percpu_ref_alive() -> __ref_is_percpu()
This patch is pure rename and doesn't introduce any functional
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/percpu-refcount.h | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 3d463a39e0f7..910e5f72055d 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -54,6 +54,11 @@ struct percpu_ref; typedef void (percpu_ref_func_t)(struct percpu_ref *); +/* flags set in the lower bits of percpu_ref->percpu_count_ptr */ +enum { + __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ +}; + struct percpu_ref { atomic_long_t count; /* @@ -62,7 +67,7 @@ struct percpu_ref { */ unsigned long percpu_count_ptr; percpu_ref_func_t *release; - percpu_ref_func_t *confirm_kill; + percpu_ref_func_t *confirm_switch; struct rcu_head rcu; }; @@ -88,23 +93,21 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define __PERCPU_REF_DEAD 1 - /* * Internal helper. Don't use outside percpu-refcount proper. The * function doesn't return the pointer and let the caller test it for NULL * because doing so forces the compiler to generate two conditional * branches as it can't assume that @ref->percpu_count is not NULL. */ -static inline bool __percpu_ref_alive(struct percpu_ref *ref, - unsigned long __percpu **percpu_countp) +static inline bool __ref_is_percpu(struct percpu_ref *ref, + unsigned long __percpu **percpu_countp) { unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(percpu_ptr & __PERCPU_REF_DEAD)) + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; @@ -125,7 +128,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) rcu_read_lock_sched(); - if (__percpu_ref_alive(ref, &percpu_count)) + if (__ref_is_percpu(ref, &percpu_count)) this_cpu_inc(*percpu_count); else atomic_long_inc(&ref->count); @@ -149,7 +152,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock_sched(); - if (__percpu_ref_alive(ref, &percpu_count)) { + if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_inc(*percpu_count); ret = true; } else { @@ -183,7 +186,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) rcu_read_lock_sched(); - if (__percpu_ref_alive(ref, &percpu_count)) { + if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_inc(*percpu_count); ret = true; } @@ -208,7 +211,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_lock_sched(); - if (__percpu_ref_alive(ref, &percpu_count)) + if (__ref_is_percpu(ref, &percpu_count)) this_cpu_dec(*percpu_count); else if (unlikely(atomic_long_dec_and_test(&ref->count))) ref->release(ref); @@ -228,7 +231,7 @@ static inline bool percpu_ref_is_zero(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; - if (__percpu_ref_alive(ref, &percpu_count)) + if (__ref_is_percpu(ref, &percpu_count)) return false; return !atomic_long_read(&ref->count); } |