mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
bcachefs: Use try_cmpxchg() family of functions instead of cmpxchg()
Use try_cmpxchg() family of functions instead of cmpxchg (*ptr, old, new) == old. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, try_cmpxchg() implicitly assigns old *ptr value to "old" when cmpxchg fails. There is no need to re-read the value in the loop. No functional change intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Brian Foster <bfoster@redhat.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
e76a2b65b0
commit
68573b936d
@ -602,8 +602,8 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct task_struct *old;
|
||||
|
||||
old = cmpxchg(&bc->alloc_lock, NULL, current);
|
||||
if (old == NULL || old == current)
|
||||
old = NULL;
|
||||
if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current)
|
||||
goto success;
|
||||
|
||||
if (!cl) {
|
||||
@ -614,8 +614,8 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
|
||||
closure_wait(&bc->alloc_wait, cl);
|
||||
|
||||
/* Try again, after adding ourselves to waitlist */
|
||||
old = cmpxchg(&bc->alloc_lock, NULL, current);
|
||||
if (old == NULL || old == current) {
|
||||
old = NULL;
|
||||
if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) {
|
||||
/* We raced */
|
||||
closure_wake_up(&bc->alloc_wait);
|
||||
goto success;
|
||||
|
@ -1796,15 +1796,16 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
|
||||
static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
||||
struct btree_write *w)
|
||||
{
|
||||
unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
|
||||
unsigned long old, new;
|
||||
|
||||
old = READ_ONCE(b->will_make_reachable);
|
||||
do {
|
||||
old = new = v;
|
||||
new = old;
|
||||
if (!(old & 1))
|
||||
break;
|
||||
|
||||
new &= ~1UL;
|
||||
} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
|
||||
} while (!try_cmpxchg(&b->will_make_reachable, &old, new));
|
||||
|
||||
if (old & 1)
|
||||
closure_put(&((struct btree_update *) new)->cl);
|
||||
@ -1815,14 +1816,14 @@ static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
||||
static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
struct btree_write *w = btree_prev_write(b);
|
||||
unsigned long old, new, v;
|
||||
unsigned long old, new;
|
||||
unsigned type = 0;
|
||||
|
||||
bch2_btree_complete_write(c, b, w);
|
||||
|
||||
v = READ_ONCE(b->flags);
|
||||
old = READ_ONCE(b->flags);
|
||||
do {
|
||||
old = new = v;
|
||||
new = old;
|
||||
|
||||
if ((old & (1U << BTREE_NODE_dirty)) &&
|
||||
(old & (1U << BTREE_NODE_need_write)) &&
|
||||
@ -1842,7 +1843,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight);
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight_inner);
|
||||
}
|
||||
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||
} while (!try_cmpxchg(&b->flags, &old, new));
|
||||
|
||||
if (new & (1U << BTREE_NODE_write_in_flight))
|
||||
__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
|
||||
@ -2014,8 +2015,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
|
||||
* dirty bit requires a write lock, we can't race with other threads
|
||||
* redirtying it:
|
||||
*/
|
||||
old = READ_ONCE(b->flags);
|
||||
do {
|
||||
old = new = READ_ONCE(b->flags);
|
||||
new = old;
|
||||
|
||||
if (!(old & (1 << BTREE_NODE_dirty)))
|
||||
return;
|
||||
@ -2046,7 +2048,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
|
||||
new |= (1 << BTREE_NODE_write_in_flight_inner);
|
||||
new |= (1 << BTREE_NODE_just_written);
|
||||
new ^= (1 << BTREE_NODE_write_idx);
|
||||
} while (cmpxchg_acquire(&b->flags, old, new) != old);
|
||||
} while (!try_cmpxchg_acquire(&b->flags, &old, new));
|
||||
|
||||
if (new & (1U << BTREE_NODE_need_write))
|
||||
return;
|
||||
|
@ -228,14 +228,14 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
|
||||
struct btree_write *w = container_of(pin, struct btree_write, journal);
|
||||
struct btree *b = container_of(w, struct btree, writes[i]);
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
unsigned long old, new, v;
|
||||
unsigned long old, new;
|
||||
unsigned idx = w - b->writes;
|
||||
|
||||
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
|
||||
v = READ_ONCE(b->flags);
|
||||
|
||||
old = READ_ONCE(b->flags);
|
||||
do {
|
||||
old = new = v;
|
||||
new = old;
|
||||
|
||||
if (!(old & (1 << BTREE_NODE_dirty)) ||
|
||||
!!(old & (1 << BTREE_NODE_write_idx)) != idx ||
|
||||
@ -245,7 +245,7 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
|
||||
new &= ~BTREE_WRITE_TYPE_MASK;
|
||||
new |= BTREE_WRITE_journal_reclaim;
|
||||
new |= 1 << BTREE_NODE_need_write;
|
||||
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||
} while (!try_cmpxchg(&b->flags, &old, new));
|
||||
|
||||
btree_node_write_if_need(c, b, SIX_LOCK_read);
|
||||
six_unlock_read(&b->c.lock);
|
||||
|
@ -1356,7 +1356,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
|
||||
struct bch_fs *c = as->c;
|
||||
struct bkey_packed *k;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
unsigned long old, new, v;
|
||||
unsigned long old, new;
|
||||
|
||||
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
!btree_ptr_sectors_written(insert));
|
||||
@ -1395,14 +1395,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
|
||||
bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
|
||||
set_btree_node_dirty_acct(c, b);
|
||||
|
||||
v = READ_ONCE(b->flags);
|
||||
old = READ_ONCE(b->flags);
|
||||
do {
|
||||
old = new = v;
|
||||
new = old;
|
||||
|
||||
new &= ~BTREE_WRITE_TYPE_MASK;
|
||||
new |= BTREE_WRITE_interior;
|
||||
new |= 1 << BTREE_NODE_need_write;
|
||||
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||
} while (!try_cmpxchg(&b->flags, &old, new));
|
||||
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
@ -916,13 +916,13 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
|
||||
*/
|
||||
s64 should_not_have_added = added - (s64) disk_res_sectors;
|
||||
if (unlikely(should_not_have_added > 0)) {
|
||||
u64 old, new, v = atomic64_read(&c->sectors_available);
|
||||
u64 old, new;
|
||||
|
||||
old = atomic64_read(&c->sectors_available);
|
||||
do {
|
||||
old = v;
|
||||
new = max_t(s64, 0, old - should_not_have_added);
|
||||
} while ((v = atomic64_cmpxchg(&c->sectors_available,
|
||||
old, new)) != old);
|
||||
} while (!atomic64_try_cmpxchg(&c->sectors_available,
|
||||
&old, new));
|
||||
|
||||
added -= should_not_have_added;
|
||||
warn = true;
|
||||
@ -1523,7 +1523,7 @@ int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
||||
u64 sectors, int flags)
|
||||
{
|
||||
struct bch_fs_pcpu *pcpu;
|
||||
u64 old, v, get;
|
||||
u64 old, get;
|
||||
s64 sectors_available;
|
||||
int ret;
|
||||
|
||||
@ -1534,17 +1534,16 @@ int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
||||
if (sectors <= pcpu->sectors_available)
|
||||
goto out;
|
||||
|
||||
v = atomic64_read(&c->sectors_available);
|
||||
old = atomic64_read(&c->sectors_available);
|
||||
do {
|
||||
old = v;
|
||||
get = min((u64) sectors + SECTORS_CACHE, old);
|
||||
|
||||
if (get < sectors) {
|
||||
preempt_enable();
|
||||
goto recalculate;
|
||||
}
|
||||
} while ((v = atomic64_cmpxchg(&c->sectors_available,
|
||||
old, old - get)) != old);
|
||||
} while (!atomic64_try_cmpxchg(&c->sectors_available,
|
||||
&old, old - get));
|
||||
|
||||
pcpu->sectors_available += get;
|
||||
|
||||
|
@ -432,13 +432,13 @@ static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reserv
|
||||
#ifdef __KERNEL__
|
||||
u64 old, new;
|
||||
|
||||
old = this_cpu_read(c->pcpu->sectors_available);
|
||||
do {
|
||||
old = this_cpu_read(c->pcpu->sectors_available);
|
||||
if (sectors > old)
|
||||
return __bch2_disk_reservation_add(c, res, sectors, flags);
|
||||
|
||||
new = old - sectors;
|
||||
} while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
|
||||
} while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
|
||||
|
||||
this_cpu_add(*c->online_reserved, sectors);
|
||||
res->sectors += sectors;
|
||||
|
@ -69,11 +69,10 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
|
||||
u64 io_latency = time_after64(now, submit_time)
|
||||
? now - submit_time
|
||||
: 0;
|
||||
u64 old, new, v = atomic64_read(latency);
|
||||
u64 old, new;
|
||||
|
||||
old = atomic64_read(latency);
|
||||
do {
|
||||
old = v;
|
||||
|
||||
/*
|
||||
* If the io latency was reasonably close to the current
|
||||
* latency, skip doing the update and atomic operation - most of
|
||||
@ -84,7 +83,7 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
|
||||
break;
|
||||
|
||||
new = ewma_add(old, io_latency, 5);
|
||||
} while ((v = atomic64_cmpxchg(latency, old, new)) != old);
|
||||
} while (!atomic64_try_cmpxchg(latency, &old, new));
|
||||
|
||||
bch2_congested_acct(ca, io_latency, now, rw);
|
||||
|
||||
|
@ -230,7 +230,6 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct journal_buf *buf = journal_cur_buf(j);
|
||||
union journal_res_state old, new;
|
||||
u64 v = atomic64_read(&j->reservations.counter);
|
||||
unsigned sectors;
|
||||
|
||||
BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
|
||||
@ -238,15 +237,16 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
|
||||
old.v = atomic64_read(&j->reservations.counter);
|
||||
do {
|
||||
old.v = new.v = v;
|
||||
new.v = old.v;
|
||||
new.cur_entry_offset = closed_val;
|
||||
|
||||
if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
|
||||
old.cur_entry_offset == new.cur_entry_offset)
|
||||
return;
|
||||
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
|
||||
old.v, new.v)) != old.v);
|
||||
} while (!atomic64_try_cmpxchg(&j->reservations.counter,
|
||||
&old.v, new.v));
|
||||
|
||||
if (!__journal_entry_is_open(old))
|
||||
return;
|
||||
@ -353,7 +353,6 @@ static int journal_entry_open(struct journal *j)
|
||||
((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
|
||||
union journal_res_state old, new;
|
||||
int u64s;
|
||||
u64 v;
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
BUG_ON(journal_entry_is_open(j));
|
||||
@ -432,9 +431,9 @@ static int journal_entry_open(struct journal *j)
|
||||
*/
|
||||
j->cur_entry_u64s = u64s;
|
||||
|
||||
v = atomic64_read(&j->reservations.counter);
|
||||
old.v = atomic64_read(&j->reservations.counter);
|
||||
do {
|
||||
old.v = new.v = v;
|
||||
new.v = old.v;
|
||||
|
||||
BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
|
||||
|
||||
@ -446,8 +445,8 @@ static int journal_entry_open(struct journal *j)
|
||||
|
||||
/* Handle any already added entries */
|
||||
new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
|
||||
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
|
||||
old.v, new.v)) != old.v);
|
||||
} while (!atomic64_try_cmpxchg(&j->reservations.counter,
|
||||
&old.v, new.v));
|
||||
|
||||
if (nr_unwritten_journal_entries(j) == 1)
|
||||
mod_delayed_work(j->wq,
|
||||
|
@ -327,10 +327,10 @@ static inline int journal_res_get_fast(struct journal *j,
|
||||
unsigned flags)
|
||||
{
|
||||
union journal_res_state old, new;
|
||||
u64 v = atomic64_read(&j->reservations.counter);
|
||||
|
||||
old.v = atomic64_read(&j->reservations.counter);
|
||||
do {
|
||||
old.v = new.v = v;
|
||||
new.v = old.v;
|
||||
|
||||
/*
|
||||
* Check if there is still room in the current journal
|
||||
@ -356,8 +356,8 @@ static inline int journal_res_get_fast(struct journal *j,
|
||||
|
||||
if (flags & JOURNAL_RES_GET_CHECK)
|
||||
return 1;
|
||||
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
|
||||
old.v, new.v)) != old.v);
|
||||
} while (!atomic64_try_cmpxchg(&j->reservations.counter,
|
||||
&old.v, new.v));
|
||||
|
||||
res->ref = true;
|
||||
res->idx = old.idx;
|
||||
|
@ -1588,7 +1588,7 @@ static CLOSURE_CALLBACK(journal_write_done)
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_replicas_padded replicas;
|
||||
union journal_res_state old, new;
|
||||
u64 v, seq = le64_to_cpu(w->data->seq);
|
||||
u64 seq = le64_to_cpu(w->data->seq);
|
||||
int err = 0;
|
||||
|
||||
bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
|
||||
@ -1647,14 +1647,15 @@ static CLOSURE_CALLBACK(journal_write_done)
|
||||
if (j->watermark != BCH_WATERMARK_stripe)
|
||||
journal_reclaim_kick(&c->journal);
|
||||
|
||||
v = atomic64_read(&j->reservations.counter);
|
||||
old.v = atomic64_read(&j->reservations.counter);
|
||||
do {
|
||||
old.v = new.v = v;
|
||||
new.v = old.v;
|
||||
BUG_ON(journal_state_count(new, new.unwritten_idx));
|
||||
BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
|
||||
|
||||
new.unwritten_idx++;
|
||||
} while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v);
|
||||
} while (!atomic64_try_cmpxchg(&j->reservations.counter,
|
||||
&old.v, new.v));
|
||||
|
||||
closure_wake_up(&w->wait);
|
||||
completed = true;
|
||||
|
@ -36,15 +36,14 @@ static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
|
||||
static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
long i = s ? 1 : -1;
|
||||
long v = atomic_long_read(&lock->v), old;
|
||||
long old;
|
||||
|
||||
old = atomic_long_read(&lock->v);
|
||||
do {
|
||||
old = v;
|
||||
|
||||
if (i > 0 ? v < 0 : v > 0)
|
||||
if (i > 0 ? old < 0 : old > 0)
|
||||
return false;
|
||||
} while ((v = atomic_long_cmpxchg_acquire(&lock->v,
|
||||
old, old + i)) != old);
|
||||
} while (!atomic_long_try_cmpxchg_acquire(&lock->v, &old, old + i));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user