mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
sbitmap: Use atomic_long_try_cmpxchg in __sbitmap_queue_get_batch
Use atomic_long_try_cmpxchg instead of atomic_long_cmpxchg (*ptr, old, new) == old in __sbitmap_queue_get_batch. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, atomic_long_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails, enabling further code simplifications, e.g. an extra memory read can be avoided in the loop. No functional change intended. Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Link: https://lore.kernel.org/r/20220908151200.9993-1-ubizjak@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1de7c3cf48
commit
c35227d4e8
@ -533,16 +533,16 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
|
||||
nr = find_first_zero_bit(&map->word, map_depth);
|
||||
if (nr + nr_tags <= map_depth) {
|
||||
atomic_long_t *ptr = (atomic_long_t *) &map->word;
|
||||
unsigned long val, ret;
|
||||
unsigned long val;
|
||||
|
||||
get_mask = ((1UL << nr_tags) - 1) << nr;
|
||||
val = READ_ONCE(map->word);
|
||||
do {
|
||||
val = READ_ONCE(map->word);
|
||||
if ((val & ~get_mask) != val)
|
||||
goto next;
|
||||
ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
|
||||
} while (ret != val);
|
||||
get_mask = (get_mask & ~ret) >> nr;
|
||||
} while (!atomic_long_try_cmpxchg(ptr, &val,
|
||||
get_mask | val));
|
||||
get_mask = (get_mask & ~val) >> nr;
|
||||
if (get_mask) {
|
||||
*offset = nr + (index << sb->shift);
|
||||
update_alloc_hint_after_get(sb, depth, hint,
|
||||
|
Loading…
Reference in New Issue
Block a user