mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch
percpu_ref will be restructured so that percpu/atomic mode switching and reference killing are dedoupled. In preparation, do the following renames. * percpu_ref->confirm_kill -> percpu_ref->confirm_switch * __PERCPU_REF_DEAD -> __PERCPU_REF_ATOMIC * __percpu_ref_alive() -> __ref_is_percpu() This patch is pure rename and doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
eecc16ba9a
commit
9e804d1f58
@ -54,6 +54,11 @@
|
|||||||
struct percpu_ref;
|
struct percpu_ref;
|
||||||
typedef void (percpu_ref_func_t)(struct percpu_ref *);
|
typedef void (percpu_ref_func_t)(struct percpu_ref *);
|
||||||
|
|
||||||
|
/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
|
||||||
|
enum {
|
||||||
|
__PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
|
||||||
|
};
|
||||||
|
|
||||||
struct percpu_ref {
|
struct percpu_ref {
|
||||||
atomic_long_t count;
|
atomic_long_t count;
|
||||||
/*
|
/*
|
||||||
@ -62,7 +67,7 @@ struct percpu_ref {
|
|||||||
*/
|
*/
|
||||||
unsigned long percpu_count_ptr;
|
unsigned long percpu_count_ptr;
|
||||||
percpu_ref_func_t *release;
|
percpu_ref_func_t *release;
|
||||||
percpu_ref_func_t *confirm_kill;
|
percpu_ref_func_t *confirm_switch;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -88,23 +93,21 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
|
|||||||
return percpu_ref_kill_and_confirm(ref, NULL);
|
return percpu_ref_kill_and_confirm(ref, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __PERCPU_REF_DEAD 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal helper. Don't use outside percpu-refcount proper. The
|
* Internal helper. Don't use outside percpu-refcount proper. The
|
||||||
* function doesn't return the pointer and let the caller test it for NULL
|
* function doesn't return the pointer and let the caller test it for NULL
|
||||||
* because doing so forces the compiler to generate two conditional
|
* because doing so forces the compiler to generate two conditional
|
||||||
* branches as it can't assume that @ref->percpu_count is not NULL.
|
* branches as it can't assume that @ref->percpu_count is not NULL.
|
||||||
*/
|
*/
|
||||||
static inline bool __percpu_ref_alive(struct percpu_ref *ref,
|
static inline bool __ref_is_percpu(struct percpu_ref *ref,
|
||||||
unsigned long __percpu **percpu_countp)
|
unsigned long __percpu **percpu_countp)
|
||||||
{
|
{
|
||||||
unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
|
unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
|
||||||
|
|
||||||
/* paired with smp_store_release() in percpu_ref_reinit() */
|
/* paired with smp_store_release() in percpu_ref_reinit() */
|
||||||
smp_read_barrier_depends();
|
smp_read_barrier_depends();
|
||||||
|
|
||||||
if (unlikely(percpu_ptr & __PERCPU_REF_DEAD))
|
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
|
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
|
||||||
@ -125,7 +128,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
|
|||||||
|
|
||||||
rcu_read_lock_sched();
|
rcu_read_lock_sched();
|
||||||
|
|
||||||
if (__percpu_ref_alive(ref, &percpu_count))
|
if (__ref_is_percpu(ref, &percpu_count))
|
||||||
this_cpu_inc(*percpu_count);
|
this_cpu_inc(*percpu_count);
|
||||||
else
|
else
|
||||||
atomic_long_inc(&ref->count);
|
atomic_long_inc(&ref->count);
|
||||||
@ -149,7 +152,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
|||||||
|
|
||||||
rcu_read_lock_sched();
|
rcu_read_lock_sched();
|
||||||
|
|
||||||
if (__percpu_ref_alive(ref, &percpu_count)) {
|
if (__ref_is_percpu(ref, &percpu_count)) {
|
||||||
this_cpu_inc(*percpu_count);
|
this_cpu_inc(*percpu_count);
|
||||||
ret = true;
|
ret = true;
|
||||||
} else {
|
} else {
|
||||||
@ -183,7 +186,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
|
|||||||
|
|
||||||
rcu_read_lock_sched();
|
rcu_read_lock_sched();
|
||||||
|
|
||||||
if (__percpu_ref_alive(ref, &percpu_count)) {
|
if (__ref_is_percpu(ref, &percpu_count)) {
|
||||||
this_cpu_inc(*percpu_count);
|
this_cpu_inc(*percpu_count);
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
@ -208,7 +211,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
|
|||||||
|
|
||||||
rcu_read_lock_sched();
|
rcu_read_lock_sched();
|
||||||
|
|
||||||
if (__percpu_ref_alive(ref, &percpu_count))
|
if (__ref_is_percpu(ref, &percpu_count))
|
||||||
this_cpu_dec(*percpu_count);
|
this_cpu_dec(*percpu_count);
|
||||||
else if (unlikely(atomic_long_dec_and_test(&ref->count)))
|
else if (unlikely(atomic_long_dec_and_test(&ref->count)))
|
||||||
ref->release(ref);
|
ref->release(ref);
|
||||||
@ -228,7 +231,7 @@ static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
|
|||||||
{
|
{
|
||||||
unsigned long __percpu *percpu_count;
|
unsigned long __percpu *percpu_count;
|
||||||
|
|
||||||
if (__percpu_ref_alive(ref, &percpu_count))
|
if (__ref_is_percpu(ref, &percpu_count))
|
||||||
return false;
|
return false;
|
||||||
return !atomic_long_read(&ref->count);
|
return !atomic_long_read(&ref->count);
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
|
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
return (unsigned long __percpu *)
|
return (unsigned long __percpu *)
|
||||||
(ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
|
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -80,7 +80,7 @@ void percpu_ref_exit(struct percpu_ref *ref)
|
|||||||
|
|
||||||
if (percpu_count) {
|
if (percpu_count) {
|
||||||
free_percpu(percpu_count);
|
free_percpu(percpu_count);
|
||||||
ref->percpu_count_ptr = __PERCPU_REF_DEAD;
|
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(percpu_ref_exit);
|
EXPORT_SYMBOL_GPL(percpu_ref_exit);
|
||||||
@ -117,8 +117,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
|
|||||||
ref->release, atomic_long_read(&ref->count));
|
ref->release, atomic_long_read(&ref->count));
|
||||||
|
|
||||||
/* @ref is viewed as dead on all CPUs, send out kill confirmation */
|
/* @ref is viewed as dead on all CPUs, send out kill confirmation */
|
||||||
if (ref->confirm_kill)
|
if (ref->confirm_switch)
|
||||||
ref->confirm_kill(ref);
|
ref->confirm_switch(ref);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now we're in single atomic_long_t mode with a consistent
|
* Now we're in single atomic_long_t mode with a consistent
|
||||||
@ -145,11 +145,11 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
|
|||||||
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||||
percpu_ref_func_t *confirm_kill)
|
percpu_ref_func_t *confirm_kill)
|
||||||
{
|
{
|
||||||
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
|
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC,
|
||||||
"%s called more than once on %pf!", __func__, ref->release);
|
"%s called more than once on %pf!", __func__, ref->release);
|
||||||
|
|
||||||
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
|
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
||||||
ref->confirm_kill = confirm_kill;
|
ref->confirm_switch = confirm_kill;
|
||||||
|
|
||||||
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
|
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
|
||||||
}
|
}
|
||||||
@ -178,14 +178,14 @@ void percpu_ref_reinit(struct percpu_ref *ref)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore per-cpu operation. smp_store_release() is paired with
|
* Restore per-cpu operation. smp_store_release() is paired with
|
||||||
* smp_read_barrier_depends() in __percpu_ref_alive() and
|
* smp_read_barrier_depends() in __ref_is_percpu() and guarantees
|
||||||
* guarantees that the zeroing is visible to all percpu accesses
|
* that the zeroing is visible to all percpu accesses which can see
|
||||||
* which can see the following __PERCPU_REF_DEAD clearing.
|
* the following __PERCPU_REF_ATOMIC clearing.
|
||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
*per_cpu_ptr(percpu_count, cpu) = 0;
|
*per_cpu_ptr(percpu_count, cpu) = 0;
|
||||||
|
|
||||||
smp_store_release(&ref->percpu_count_ptr,
|
smp_store_release(&ref->percpu_count_ptr,
|
||||||
ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
|
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user