mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
sbitmap: replace CAS with atomic and
sbitmap_deferred_clear() does CAS loop to propagate cleared bits, replace it with equivalent atomic bitwise and. That's slightly faster and makes wait-free instead of lock-free as before. The atomic can be relaxed (i.e. barrier-less) because following sbitmap_get*() deal with synchronisation, see comments in sbitmap_queue_clear(). It's ok to cast to atomic_long_t, that's what bitops/lock.h does. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
661d4f55a7
commit
c3250c8d24
@ -14,7 +14,7 @@
|
||||
*/
|
||||
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
|
||||
{
|
||||
unsigned long mask, val;
|
||||
unsigned long mask;
|
||||
|
||||
if (!READ_ONCE(map->cleared))
|
||||
return false;
|
||||
@ -27,10 +27,8 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
|
||||
/*
|
||||
* Now clear the masked bits in our free word
|
||||
*/
|
||||
do {
|
||||
val = map->word;
|
||||
} while (cmpxchg(&map->word, val, val & ~mask) != val);
|
||||
|
||||
atomic_long_andnot(mask, (atomic_long_t *)&map->word);
|
||||
BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user