mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
closures: closure_get_not_zero(), closure_return_sync()
Provide new primitives for solving a lifetime issue with bcachefs btree_trans objects. closure_sync_return(): like closure_sync(), wait synchronously for any outstanding gets. like closure_return, the closure is considered "finished" and the ref left at 0. closure_get_not_zero(): get a ref on a closure if it's alive, i.e. the ref is not zero. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
18e92841e8
commit
06efa5f30c
@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* closure_get_not_zero
|
||||||
|
*/
|
||||||
|
static inline bool closure_get_not_zero(struct closure *cl)
|
||||||
|
{
|
||||||
|
unsigned old = atomic_read(&cl->remaining);
|
||||||
|
do {
|
||||||
|
if (!(old & CLOSURE_REMAINING_MASK))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
} while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* closure_init - Initialize a closure, setting the refcount to 1
|
* closure_init - Initialize a closure, setting the refcount to 1
|
||||||
* @cl: closure to initialize
|
* @cl: closure to initialize
|
||||||
@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl)
|
|||||||
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
|
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void closure_init_stack_release(struct closure *cl)
|
||||||
|
{
|
||||||
|
memset(cl, 0, sizeof(struct closure));
|
||||||
|
atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* closure_wake_up - wake up all closures on a wait list,
|
* closure_wake_up - wake up all closures on a wait list,
|
||||||
* with memory barrier
|
* with memory barrier
|
||||||
@ -355,6 +376,8 @@ do { \
|
|||||||
*/
|
*/
|
||||||
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
|
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
|
||||||
|
|
||||||
|
void closure_return_sync(struct closure *cl);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* continue_at_nobarrier - jump to another function without barrier
|
* continue_at_nobarrier - jump to another function without barrier
|
||||||
*
|
*
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/sched/debug.h>
|
#include <linux/sched/debug.h>
|
||||||
|
|
||||||
static inline void closure_put_after_sub(struct closure *cl, int flags)
|
static inline void closure_put_after_sub_checks(int flags)
|
||||||
{
|
{
|
||||||
int r = flags & CLOSURE_REMAINING_MASK;
|
int r = flags & CLOSURE_REMAINING_MASK;
|
||||||
|
|
||||||
@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
|
|||||||
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
|
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
|
||||||
r &= ~CLOSURE_GUARD_MASK;
|
r &= ~CLOSURE_GUARD_MASK;
|
||||||
|
|
||||||
if (!r) {
|
WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
|
||||||
smp_acquire__after_ctrl_dep();
|
"closure ref hit 0 with incorrect flags set: %x (%u)",
|
||||||
|
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
|
||||||
|
}
|
||||||
|
|
||||||
WARN(flags & ~CLOSURE_DESTRUCTOR,
|
static inline void closure_put_after_sub(struct closure *cl, int flags)
|
||||||
"closure ref hit 0 with incorrect flags set: %x (%u)",
|
{
|
||||||
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
|
closure_put_after_sub_checks(flags);
|
||||||
|
|
||||||
|
if (!(flags & CLOSURE_REMAINING_MASK)) {
|
||||||
|
smp_acquire__after_ctrl_dep();
|
||||||
|
|
||||||
cl->closure_get_happened = false;
|
cl->closure_get_happened = false;
|
||||||
|
|
||||||
@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__closure_sync);
|
EXPORT_SYMBOL(__closure_sync);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* closure_return_sync - finish running a closure, synchronously (i.e. waiting
|
||||||
|
* for outstanding get()s to finish) and returning once closure refcount is 0.
|
||||||
|
*
|
||||||
|
* Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
|
||||||
|
* closure_get_not_zero() calls waill fail.
|
||||||
|
*/
|
||||||
|
void __sched closure_return_sync(struct closure *cl)
|
||||||
|
{
|
||||||
|
struct closure_syncer s = { .task = current };
|
||||||
|
|
||||||
|
cl->s = &s;
|
||||||
|
set_closure_fn(cl, closure_sync_fn, NULL);
|
||||||
|
|
||||||
|
unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
|
||||||
|
&cl->remaining);
|
||||||
|
|
||||||
|
closure_put_after_sub_checks(flags);
|
||||||
|
|
||||||
|
if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
|
||||||
|
while (1) {
|
||||||
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
|
if (s.done)
|
||||||
|
break;
|
||||||
|
schedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cl->parent)
|
||||||
|
closure_put(cl->parent);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(closure_return_sync);
|
||||||
|
|
||||||
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
|
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
|
||||||
{
|
{
|
||||||
struct closure_syncer s = { .task = current };
|
struct closure_syncer s = { .task = current };
|
||||||
|
Loading…
Reference in New Issue
Block a user