mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
xarray: Add XArray unconditional store operations
xa_store() differs from radix_tree_insert() in that it will overwrite an existing element in the array rather than returning an error. This is the behaviour which most users want, and those that want more complex behaviour generally want to use the xas family of routines anyway. For memory allocation, xa_store() will first attempt to request memory from the slab allocator; if memory is not immediately available, it will drop the xa_lock and allocate memory, keeping a pointer in the xa_state. It does not use the per-CPU cache, although those will continue to exist until all radix tree users are converted to the xarray. This patch also includes xa_erase() and __xa_erase() for a streamlined way to store NULL. Since there is no need to allocate memory in order to store a NULL in the XArray, we do not need to trouble the user with deciding what memory allocation flags to use. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
9b89a03551
commit
58d6ea3085
@ -205,10 +205,17 @@ typedef unsigned __bitwise xa_mark_t;
|
||||
#define XA_PRESENT ((__force xa_mark_t)8U)
|
||||
#define XA_MARK_MAX XA_MARK_2
|
||||
|
||||
enum xa_lock_type {
|
||||
XA_LOCK_IRQ = 1,
|
||||
XA_LOCK_BH = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
|
||||
* and we remain compatible with that.
|
||||
*/
|
||||
#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
|
||||
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
|
||||
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
|
||||
(__force unsigned)(mark)))
|
||||
|
||||
@ -267,6 +274,7 @@ struct xarray {
|
||||
|
||||
void xa_init_flags(struct xarray *, gfp_t flags);
|
||||
void *xa_load(struct xarray *, unsigned long index);
|
||||
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
@ -309,6 +317,23 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
|
||||
return xa->xa_flags & XA_FLAGS_MARK(mark);
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_erase() - Erase this entry from the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index of entry.
|
||||
*
|
||||
* This function is the equivalent of calling xa_store() with %NULL as
|
||||
* the third argument. The XArray does not need to allocate memory, so
|
||||
* the user does not need to provide GFP flags.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock.
|
||||
* Return: The entry which used to be at this index.
|
||||
*/
|
||||
static inline void *xa_erase(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
return xa_store(xa, index, NULL, 0);
|
||||
}
|
||||
|
||||
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
|
||||
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
|
||||
#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
|
||||
@ -322,11 +347,65 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
|
||||
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
|
||||
|
||||
/*
|
||||
* Versions of the normal API which require the caller to hold the xa_lock.
|
||||
* Versions of the normal API which require the caller to hold the
|
||||
* xa_lock. If the GFP flags allow it, they will drop the lock to
|
||||
* allocate memory, then reacquire it afterwards. These functions
|
||||
* may also re-enable interrupts if the XArray flags indicate the
|
||||
* locking should be interrupt safe.
|
||||
*/
|
||||
void *__xa_erase(struct xarray *, unsigned long index);
|
||||
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
|
||||
/**
|
||||
* xa_erase_bh() - Erase this entry from the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index of entry.
|
||||
*
|
||||
* This function is the equivalent of calling xa_store() with %NULL as
|
||||
* the third argument. The XArray does not need to allocate memory, so
|
||||
* the user does not need to provide GFP flags.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling softirqs.
|
||||
* Return: The entry which used to be at this index.
|
||||
*/
|
||||
static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
void *entry;
|
||||
|
||||
xa_lock_bh(xa);
|
||||
entry = __xa_erase(xa, index);
|
||||
xa_unlock_bh(xa);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_erase_irq() - Erase this entry from the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index of entry.
|
||||
*
|
||||
* This function is the equivalent of calling xa_store() with %NULL as
|
||||
* the third argument. The XArray does not need to allocate memory, so
|
||||
* the user does not need to provide GFP flags.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling interrupts.
|
||||
* Return: The entry which used to be at this index.
|
||||
*/
|
||||
static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
void *entry;
|
||||
|
||||
xa_lock_irq(xa);
|
||||
entry = __xa_erase(xa, index);
|
||||
xa_unlock_irq(xa);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/* Everything below here is the Advanced API. Proceed with caution. */
|
||||
|
||||
/*
|
||||
@ -441,6 +520,12 @@ static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
|
||||
lockdep_is_held(&xa->xa_lock));
|
||||
}
|
||||
|
||||
/* Private */
|
||||
static inline void *xa_mk_node(const struct xa_node *node)
|
||||
{
|
||||
return (void *)((unsigned long)node | 2);
|
||||
}
|
||||
|
||||
/* Private */
|
||||
static inline struct xa_node *xa_to_node(const void *entry)
|
||||
{
|
||||
@ -647,6 +732,12 @@ static inline bool xas_not_node(struct xa_node *node)
|
||||
return ((unsigned long)node & 3) || !node;
|
||||
}
|
||||
|
||||
/* True if the node represents head-of-tree, RESTART or BOUNDS */
|
||||
static inline bool xas_top(struct xa_node *node)
|
||||
{
|
||||
return node <= XAS_RESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_reset() - Reset an XArray operation state.
|
||||
* @xas: XArray operation state.
|
||||
@ -683,10 +774,14 @@ static inline bool xas_retry(struct xa_state *xas, const void *entry)
|
||||
}
|
||||
|
||||
void *xas_load(struct xa_state *);
|
||||
void *xas_store(struct xa_state *, void *entry);
|
||||
|
||||
bool xas_get_mark(const struct xa_state *, xa_mark_t);
|
||||
void xas_set_mark(const struct xa_state *, xa_mark_t);
|
||||
void xas_clear_mark(const struct xa_state *, xa_mark_t);
|
||||
void xas_init_marks(const struct xa_state *);
|
||||
|
||||
bool xas_nomem(struct xa_state *, gfp_t);
|
||||
|
||||
/**
|
||||
* xas_reload() - Refetch an entry from the xarray.
|
||||
@ -711,4 +806,52 @@ static inline void *xas_reload(struct xa_state *xas)
|
||||
return xa_head(xas->xa);
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_set() - Set up XArray operation state for a different index.
|
||||
* @xas: XArray operation state.
|
||||
* @index: New index into the XArray.
|
||||
*
|
||||
* Move the operation state to refer to a different index. This will
|
||||
* have the effect of starting a walk from the top; see xas_next()
|
||||
* to move to an adjacent index.
|
||||
*/
|
||||
static inline void xas_set(struct xa_state *xas, unsigned long index)
|
||||
{
|
||||
xas->xa_index = index;
|
||||
xas->xa_node = XAS_RESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_set_order() - Set up XArray operation state for a multislot entry.
|
||||
* @xas: XArray operation state.
|
||||
* @index: Target of the operation.
|
||||
* @order: Entry occupies 2^@order indices.
|
||||
*/
|
||||
static inline void xas_set_order(struct xa_state *xas, unsigned long index,
|
||||
unsigned int order)
|
||||
{
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
|
||||
xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
|
||||
xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
|
||||
xas->xa_node = XAS_RESTART;
|
||||
#else
|
||||
BUG_ON(order > 0);
|
||||
xas_set(xas, index);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_set_update() - Set up XArray operation state for a callback.
|
||||
* @xas: XArray operation state.
|
||||
* @update: Function to call when updating a node.
|
||||
*
|
||||
* The XArray can notify a caller after it has updated an xa_node.
|
||||
* This is advanced functionality and is only needed by the page cache.
|
||||
*/
|
||||
static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
|
||||
{
|
||||
xas->xa_update = update;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_XARRAY_H */
|
||||
|
@ -47,7 +47,7 @@ static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
|
||||
/*
|
||||
* Radix tree node cache.
|
||||
*/
|
||||
static struct kmem_cache *radix_tree_node_cachep;
|
||||
struct kmem_cache *radix_tree_node_cachep;
|
||||
|
||||
/*
|
||||
* The radix tree is variable-height, so an insert operation not only has
|
||||
@ -365,7 +365,7 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void radix_tree_node_rcu_free(struct rcu_head *head)
|
||||
void radix_tree_node_rcu_free(struct rcu_head *head)
|
||||
{
|
||||
struct radix_tree_node *node =
|
||||
container_of(head, struct radix_tree_node, rcu_head);
|
||||
|
@ -30,13 +30,49 @@ void xa_dump(const struct xarray *xa) { }
|
||||
|
||||
static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
|
||||
{
|
||||
radix_tree_insert(xa, index, xa_mk_value(index));
|
||||
return NULL;
|
||||
return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
|
||||
}
|
||||
|
||||
static void xa_erase_index(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
radix_tree_delete(xa, index);
|
||||
XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
|
||||
XA_BUG_ON(xa, xa_load(xa, index) != NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* If anyone needs this, please move it to xarray.c. We have no current
|
||||
* users outside the test suite because all current multislot users want
|
||||
* to use the advanced API.
|
||||
*/
|
||||
static void *xa_store_order(struct xarray *xa, unsigned long index,
|
||||
unsigned order, void *entry, gfp_t gfp)
|
||||
{
|
||||
XA_STATE_ORDER(xas, xa, index, order);
|
||||
void *curr;
|
||||
|
||||
do {
|
||||
xas_lock(&xas);
|
||||
curr = xas_store(&xas, entry);
|
||||
xas_unlock(&xas);
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
|
||||
return curr;
|
||||
}
|
||||
|
||||
static noinline void check_xa_err(struct xarray *xa)
|
||||
{
|
||||
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
|
||||
XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
|
||||
#ifndef __KERNEL__
|
||||
/* The kernel does not fail GFP_NOWAIT allocations */
|
||||
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
|
||||
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
|
||||
#endif
|
||||
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
|
||||
XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
|
||||
XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
|
||||
// kills the test-suite :-(
|
||||
// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
|
||||
}
|
||||
|
||||
static noinline void check_xa_load(struct xarray *xa)
|
||||
@ -69,6 +105,9 @@ static noinline void check_xa_load(struct xarray *xa)
|
||||
|
||||
static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
unsigned int order;
|
||||
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
|
||||
|
||||
/* NULL elements have no marks set */
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
||||
xa_set_mark(xa, index, XA_MARK_0);
|
||||
@ -90,6 +129,37 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
||||
xa_set_mark(xa, index, XA_MARK_0);
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
||||
|
||||
/*
|
||||
* Storing a multi-index entry over entries with marks gives the
|
||||
* entire entry the union of the marks
|
||||
*/
|
||||
BUG_ON((index % 4) != 0);
|
||||
for (order = 2; order < max_order; order++) {
|
||||
unsigned long base = round_down(index, 1UL << order);
|
||||
unsigned long next = base + (1UL << order);
|
||||
unsigned long i;
|
||||
|
||||
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
|
||||
xa_set_mark(xa, index + 1, XA_MARK_0);
|
||||
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
|
||||
xa_set_mark(xa, index + 2, XA_MARK_1);
|
||||
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
|
||||
xa_store_order(xa, index, order, xa_mk_value(index),
|
||||
GFP_KERNEL);
|
||||
for (i = base; i < next; i++) {
|
||||
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
|
||||
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
|
||||
}
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
|
||||
xa_erase_index(xa, index);
|
||||
xa_erase_index(xa, next);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static noinline void check_xa_mark(struct xarray *xa)
|
||||
@ -100,12 +170,111 @@ static noinline void check_xa_mark(struct xarray *xa)
|
||||
check_xa_mark_1(xa, index);
|
||||
}
|
||||
|
||||
static RADIX_TREE(array, GFP_KERNEL);
|
||||
static noinline void check_xa_shrink(struct xarray *xa)
|
||||
{
|
||||
XA_STATE(xas, xa, 1);
|
||||
struct xa_node *node;
|
||||
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
|
||||
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
|
||||
|
||||
/*
|
||||
* Check that erasing the entry at 1 shrinks the tree and properly
|
||||
* marks the node as being deleted.
|
||||
*/
|
||||
xas_lock(&xas);
|
||||
XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
|
||||
node = xas.xa_node;
|
||||
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
|
||||
XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
|
||||
XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
|
||||
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
|
||||
XA_BUG_ON(xa, xas_load(&xas) != NULL);
|
||||
xas_unlock(&xas);
|
||||
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
||||
xa_erase_index(xa, 0);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static noinline void check_multi_store(struct xarray *xa)
|
||||
{
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
unsigned long i, j, k;
|
||||
unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
|
||||
|
||||
/* Loading from any position returns the same value */
|
||||
xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
|
||||
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
|
||||
rcu_read_lock();
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Storing adjacent to the value does not alter the value */
|
||||
xa_store(xa, 3, xa, GFP_KERNEL);
|
||||
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
|
||||
rcu_read_lock();
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Overwriting multiple indexes works */
|
||||
xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
|
||||
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
|
||||
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
|
||||
XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
|
||||
XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
|
||||
XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
|
||||
rcu_read_lock();
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
|
||||
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* We can erase multiple values with a single store */
|
||||
xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
/* Even when the first slot is empty but the others aren't */
|
||||
xa_store_index(xa, 1, GFP_KERNEL);
|
||||
xa_store_index(xa, 2, GFP_KERNEL);
|
||||
xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
for (i = 0; i < max_order; i++) {
|
||||
for (j = 0; j < max_order; j++) {
|
||||
xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
|
||||
xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
|
||||
|
||||
for (k = 0; k < max_order; k++) {
|
||||
void *entry = xa_load(xa, (1UL << k) - 1);
|
||||
if ((i < k) && (j < k))
|
||||
XA_BUG_ON(xa, entry != NULL);
|
||||
else
|
||||
XA_BUG_ON(xa, entry != xa_mk_value(j));
|
||||
}
|
||||
|
||||
xa_erase(xa, 0);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY(array);
|
||||
|
||||
static int xarray_checks(void)
|
||||
{
|
||||
check_xa_err(&array);
|
||||
check_xa_load(&array);
|
||||
check_xa_mark(&array);
|
||||
check_xa_shrink(&array);
|
||||
check_multi_store(&array);
|
||||
|
||||
printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
|
||||
return (tests_run == tests_passed) ? 0 : -EINVAL;
|
||||
|
693
lib/xarray.c
693
lib/xarray.c
@ -7,6 +7,8 @@
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
/*
|
||||
@ -25,6 +27,31 @@
|
||||
* @entry refers to something stored in a slot in the xarray
|
||||
*/
|
||||
|
||||
static inline unsigned int xa_lock_type(const struct xarray *xa)
|
||||
{
|
||||
return (__force unsigned int)xa->xa_flags & 3;
|
||||
}
|
||||
|
||||
static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
|
||||
{
|
||||
if (lock_type == XA_LOCK_IRQ)
|
||||
xas_lock_irq(xas);
|
||||
else if (lock_type == XA_LOCK_BH)
|
||||
xas_lock_bh(xas);
|
||||
else
|
||||
xas_lock(xas);
|
||||
}
|
||||
|
||||
static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
|
||||
{
|
||||
if (lock_type == XA_LOCK_IRQ)
|
||||
xas_unlock_irq(xas);
|
||||
else if (lock_type == XA_LOCK_BH)
|
||||
xas_unlock_bh(xas);
|
||||
else
|
||||
xas_unlock(xas);
|
||||
}
|
||||
|
||||
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
|
||||
{
|
||||
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
|
||||
@ -67,6 +94,34 @@ static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
|
||||
return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
#define mark_inc(mark) do { \
|
||||
mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* xas_squash_marks() - Merge all marks to the first entry
|
||||
* @xas: Array operation state.
|
||||
*
|
||||
* Set a mark on the first entry if any entry has it set. Clear marks on
|
||||
* all sibling entries.
|
||||
*/
|
||||
static void xas_squash_marks(const struct xa_state *xas)
|
||||
{
|
||||
unsigned int mark = 0;
|
||||
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
|
||||
|
||||
if (!xas->xa_sibs)
|
||||
return;
|
||||
|
||||
do {
|
||||
unsigned long *marks = xas->xa_node->marks[mark];
|
||||
if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
|
||||
continue;
|
||||
__set_bit(xas->xa_offset, marks);
|
||||
bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
|
||||
} while (mark++ != (__force unsigned)XA_MARK_MAX);
|
||||
}
|
||||
|
||||
/* extracts the offset within this node from the index */
|
||||
static unsigned int get_offset(unsigned long index, struct xa_node *node)
|
||||
{
|
||||
@ -161,6 +216,516 @@ void *xas_load(struct xa_state *xas)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_load);
|
||||
|
||||
/* Move the radix tree node cache here */
|
||||
extern struct kmem_cache *radix_tree_node_cachep;
|
||||
extern void radix_tree_node_rcu_free(struct rcu_head *head);
|
||||
|
||||
#define XA_RCU_FREE ((struct xarray *)1)
|
||||
|
||||
static void xa_node_free(struct xa_node *node)
|
||||
{
|
||||
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
|
||||
node->array = XA_RCU_FREE;
|
||||
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* xas_destroy() - Free any resources allocated during the XArray operation.
|
||||
* @xas: XArray operation state.
|
||||
*
|
||||
* This function is now internal-only.
|
||||
*/
|
||||
static void xas_destroy(struct xa_state *xas)
|
||||
{
|
||||
struct xa_node *node = xas->xa_alloc;
|
||||
|
||||
if (!node)
|
||||
return;
|
||||
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
|
||||
kmem_cache_free(radix_tree_node_cachep, node);
|
||||
xas->xa_alloc = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_nomem() - Allocate memory if needed.
|
||||
* @xas: XArray operation state.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* If we need to add new nodes to the XArray, we try to allocate memory
|
||||
* with GFP_NOWAIT while holding the lock, which will usually succeed.
|
||||
* If it fails, @xas is flagged as needing memory to continue. The caller
|
||||
* should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
|
||||
* the caller should retry the operation.
|
||||
*
|
||||
* Forward progress is guaranteed as one node is allocated here and
|
||||
* stored in the xa_state where it will be found by xas_alloc(). More
|
||||
* nodes will likely be found in the slab allocator, but we do not tie
|
||||
* them up here.
|
||||
*
|
||||
* Return: true if memory was needed, and was successfully allocated.
|
||||
*/
|
||||
bool xas_nomem(struct xa_state *xas, gfp_t gfp)
|
||||
{
|
||||
if (xas->xa_node != XA_ERROR(-ENOMEM)) {
|
||||
xas_destroy(xas);
|
||||
return false;
|
||||
}
|
||||
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
|
||||
if (!xas->xa_alloc)
|
||||
return false;
|
||||
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
|
||||
xas->xa_node = XAS_RESTART;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_nomem);
|
||||
|
||||
/*
|
||||
* __xas_nomem() - Drop locks and allocate memory if needed.
|
||||
* @xas: XArray operation state.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Internal variant of xas_nomem().
|
||||
*
|
||||
* Return: true if memory was needed, and was successfully allocated.
|
||||
*/
|
||||
static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
|
||||
__must_hold(xas->xa->xa_lock)
|
||||
{
|
||||
unsigned int lock_type = xa_lock_type(xas->xa);
|
||||
|
||||
if (xas->xa_node != XA_ERROR(-ENOMEM)) {
|
||||
xas_destroy(xas);
|
||||
return false;
|
||||
}
|
||||
if (gfpflags_allow_blocking(gfp)) {
|
||||
xas_unlock_type(xas, lock_type);
|
||||
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
|
||||
xas_lock_type(xas, lock_type);
|
||||
} else {
|
||||
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
|
||||
}
|
||||
if (!xas->xa_alloc)
|
||||
return false;
|
||||
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
|
||||
xas->xa_node = XAS_RESTART;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void xas_update(struct xa_state *xas, struct xa_node *node)
|
||||
{
|
||||
if (xas->xa_update)
|
||||
xas->xa_update(node);
|
||||
else
|
||||
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
|
||||
}
|
||||
|
||||
static void *xas_alloc(struct xa_state *xas, unsigned int shift)
|
||||
{
|
||||
struct xa_node *parent = xas->xa_node;
|
||||
struct xa_node *node = xas->xa_alloc;
|
||||
|
||||
if (xas_invalid(xas))
|
||||
return NULL;
|
||||
|
||||
if (node) {
|
||||
xas->xa_alloc = NULL;
|
||||
} else {
|
||||
node = kmem_cache_alloc(radix_tree_node_cachep,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!node) {
|
||||
xas_set_err(xas, -ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
node->offset = xas->xa_offset;
|
||||
parent->count++;
|
||||
XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
|
||||
xas_update(xas, parent);
|
||||
}
|
||||
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
|
||||
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
|
||||
node->shift = shift;
|
||||
node->count = 0;
|
||||
node->nr_values = 0;
|
||||
RCU_INIT_POINTER(node->parent, xas->xa_node);
|
||||
node->array = xas->xa;
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this to calculate the maximum index that will need to be created
|
||||
* in order to add the entry described by @xas. Because we cannot store a
|
||||
* multiple-index entry at index 0, the calculation is a little more complex
|
||||
* than you might expect.
|
||||
*/
|
||||
static unsigned long xas_max(struct xa_state *xas)
|
||||
{
|
||||
unsigned long max = xas->xa_index;
|
||||
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
if (xas->xa_shift || xas->xa_sibs) {
|
||||
unsigned long mask;
|
||||
mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
|
||||
max |= mask;
|
||||
if (mask == max)
|
||||
max++;
|
||||
}
|
||||
#endif
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
/* The maximum index that can be contained in the array without expanding it */
|
||||
static unsigned long max_index(void *entry)
|
||||
{
|
||||
if (!xa_is_node(entry))
|
||||
return 0;
|
||||
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
|
||||
}
|
||||
|
||||
static void xas_shrink(struct xa_state *xas)
|
||||
{
|
||||
struct xarray *xa = xas->xa;
|
||||
struct xa_node *node = xas->xa_node;
|
||||
|
||||
for (;;) {
|
||||
void *entry;
|
||||
|
||||
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
|
||||
if (node->count != 1)
|
||||
break;
|
||||
entry = xa_entry_locked(xa, node, 0);
|
||||
if (!entry)
|
||||
break;
|
||||
if (!xa_is_node(entry) && node->shift)
|
||||
break;
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
|
||||
RCU_INIT_POINTER(xa->xa_head, entry);
|
||||
|
||||
node->count = 0;
|
||||
node->nr_values = 0;
|
||||
if (!xa_is_node(entry))
|
||||
RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
|
||||
xas_update(xas, node);
|
||||
xa_node_free(node);
|
||||
if (!xa_is_node(entry))
|
||||
break;
|
||||
node = xa_to_node(entry);
|
||||
node->parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* xas_delete_node() - Attempt to delete an xa_node
|
||||
* @xas: Array operation state.
|
||||
*
|
||||
* Attempts to delete the @xas->xa_node. This will fail if xa->node has
|
||||
* a non-zero reference count.
|
||||
*/
|
||||
static void xas_delete_node(struct xa_state *xas)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
|
||||
for (;;) {
|
||||
struct xa_node *parent;
|
||||
|
||||
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
|
||||
if (node->count)
|
||||
break;
|
||||
|
||||
parent = xa_parent_locked(xas->xa, node);
|
||||
xas->xa_node = parent;
|
||||
xas->xa_offset = node->offset;
|
||||
xa_node_free(node);
|
||||
|
||||
if (!parent) {
|
||||
xas->xa->xa_head = NULL;
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
return;
|
||||
}
|
||||
|
||||
parent->slots[xas->xa_offset] = NULL;
|
||||
parent->count--;
|
||||
XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
|
||||
node = parent;
|
||||
xas_update(xas, node);
|
||||
}
|
||||
|
||||
if (!node->parent)
|
||||
xas_shrink(xas);
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_free_nodes() - Free this node and all nodes that it references
|
||||
* @xas: Array operation state.
|
||||
* @top: Node to free
|
||||
*
|
||||
* This node has been removed from the tree. We must now free it and all
|
||||
* of its subnodes. There may be RCU walkers with references into the tree,
|
||||
* so we must replace all entries with retry markers.
|
||||
*/
|
||||
static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
|
||||
{
|
||||
unsigned int offset = 0;
|
||||
struct xa_node *node = top;
|
||||
|
||||
for (;;) {
|
||||
void *entry = xa_entry_locked(xas->xa, node, offset);
|
||||
|
||||
if (xa_is_node(entry)) {
|
||||
node = xa_to_node(entry);
|
||||
offset = 0;
|
||||
continue;
|
||||
}
|
||||
if (entry)
|
||||
RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
|
||||
offset++;
|
||||
while (offset == XA_CHUNK_SIZE) {
|
||||
struct xa_node *parent;
|
||||
|
||||
parent = xa_parent_locked(xas->xa, node);
|
||||
offset = node->offset + 1;
|
||||
node->count = 0;
|
||||
node->nr_values = 0;
|
||||
xas_update(xas, node);
|
||||
xa_node_free(node);
|
||||
if (node == top)
|
||||
return;
|
||||
node = parent;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* xas_expand adds nodes to the head of the tree until it has reached
|
||||
* sufficient height to be able to contain @xas->xa_index
|
||||
*/
|
||||
static int xas_expand(struct xa_state *xas, void *head)
|
||||
{
|
||||
struct xarray *xa = xas->xa;
|
||||
struct xa_node *node = NULL;
|
||||
unsigned int shift = 0;
|
||||
unsigned long max = xas_max(xas);
|
||||
|
||||
if (!head) {
|
||||
if (max == 0)
|
||||
return 0;
|
||||
while ((max >> shift) >= XA_CHUNK_SIZE)
|
||||
shift += XA_CHUNK_SHIFT;
|
||||
return shift + XA_CHUNK_SHIFT;
|
||||
} else if (xa_is_node(head)) {
|
||||
node = xa_to_node(head);
|
||||
shift = node->shift + XA_CHUNK_SHIFT;
|
||||
}
|
||||
xas->xa_node = NULL;
|
||||
|
||||
while (max > max_index(head)) {
|
||||
xa_mark_t mark = 0;
|
||||
|
||||
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
|
||||
node = xas_alloc(xas, shift);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->count = 1;
|
||||
if (xa_is_value(head))
|
||||
node->nr_values = 1;
|
||||
RCU_INIT_POINTER(node->slots[0], head);
|
||||
|
||||
/* Propagate the aggregated mark info to the new child */
|
||||
for (;;) {
|
||||
if (xa_marked(xa, mark))
|
||||
node_set_mark(node, 0, mark);
|
||||
if (mark == XA_MARK_MAX)
|
||||
break;
|
||||
mark_inc(mark);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that the new node is fully initialised, we can add
|
||||
* it to the tree
|
||||
*/
|
||||
if (xa_is_node(head)) {
|
||||
xa_to_node(head)->offset = 0;
|
||||
rcu_assign_pointer(xa_to_node(head)->parent, node);
|
||||
}
|
||||
head = xa_mk_node(node);
|
||||
rcu_assign_pointer(xa->xa_head, head);
|
||||
xas_update(xas, node);
|
||||
|
||||
shift += XA_CHUNK_SHIFT;
|
||||
}
|
||||
|
||||
xas->xa_node = node;
|
||||
return shift;
|
||||
}
|
||||
|
||||
/*
|
||||
* xas_create() - Create a slot to store an entry in.
|
||||
* @xas: XArray operation state.
|
||||
*
|
||||
* Most users will not need to call this function directly, as it is called
|
||||
* by xas_store(). It is useful for doing conditional store operations
|
||||
* (see the xa_cmpxchg() implementation for an example).
|
||||
*
|
||||
* Return: If the slot already existed, returns the contents of this slot.
|
||||
* If the slot was newly created, returns NULL. If it failed to create the
|
||||
* slot, returns NULL and indicates the error in @xas.
|
||||
*/
|
||||
static void *xas_create(struct xa_state *xas)
|
||||
{
|
||||
struct xarray *xa = xas->xa;
|
||||
void *entry;
|
||||
void __rcu **slot;
|
||||
struct xa_node *node = xas->xa_node;
|
||||
int shift;
|
||||
unsigned int order = xas->xa_shift;
|
||||
|
||||
if (xas_top(node)) {
|
||||
entry = xa_head_locked(xa);
|
||||
xas->xa_node = NULL;
|
||||
shift = xas_expand(xas, entry);
|
||||
if (shift < 0)
|
||||
return NULL;
|
||||
entry = xa_head_locked(xa);
|
||||
slot = &xa->xa_head;
|
||||
} else if (xas_error(xas)) {
|
||||
return NULL;
|
||||
} else if (node) {
|
||||
unsigned int offset = xas->xa_offset;
|
||||
|
||||
shift = node->shift;
|
||||
entry = xa_entry_locked(xa, node, offset);
|
||||
slot = &node->slots[offset];
|
||||
} else {
|
||||
shift = 0;
|
||||
entry = xa_head_locked(xa);
|
||||
slot = &xa->xa_head;
|
||||
}
|
||||
|
||||
while (shift > order) {
|
||||
shift -= XA_CHUNK_SHIFT;
|
||||
if (!entry) {
|
||||
node = xas_alloc(xas, shift);
|
||||
if (!node)
|
||||
break;
|
||||
rcu_assign_pointer(*slot, xa_mk_node(node));
|
||||
} else if (xa_is_node(entry)) {
|
||||
node = xa_to_node(entry);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
entry = xas_descend(xas, node);
|
||||
slot = &node->slots[xas->xa_offset];
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void update_node(struct xa_state *xas, struct xa_node *node,
|
||||
int count, int values)
|
||||
{
|
||||
if (!node || (!count && !values))
|
||||
return;
|
||||
|
||||
node->count += count;
|
||||
node->nr_values += values;
|
||||
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
|
||||
XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
|
||||
xas_update(xas, node);
|
||||
if (count < 0)
|
||||
xas_delete_node(xas);
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_store() - Store this entry in the XArray.
|
||||
* @xas: XArray operation state.
|
||||
* @entry: New entry.
|
||||
*
|
||||
* If @xas is operating on a multi-index entry, the entry returned by this
|
||||
* function is essentially meaningless (it may be an internal entry or it
|
||||
* may be %NULL, even if there are non-NULL entries at some of the indices
|
||||
* covered by the range). This is not a problem for any current users,
|
||||
* and can be changed if needed.
|
||||
*
|
||||
* Return: The old entry at this index.
|
||||
*/
|
||||
void *xas_store(struct xa_state *xas, void *entry)
|
||||
{
|
||||
struct xa_node *node;
|
||||
void __rcu **slot = &xas->xa->xa_head;
|
||||
unsigned int offset, max;
|
||||
int count = 0;
|
||||
int values = 0;
|
||||
void *first, *next;
|
||||
bool value = xa_is_value(entry);
|
||||
|
||||
if (entry)
|
||||
first = xas_create(xas);
|
||||
else
|
||||
first = xas_load(xas);
|
||||
|
||||
if (xas_invalid(xas))
|
||||
return first;
|
||||
node = xas->xa_node;
|
||||
if (node && (xas->xa_shift < node->shift))
|
||||
xas->xa_sibs = 0;
|
||||
if ((first == entry) && !xas->xa_sibs)
|
||||
return first;
|
||||
|
||||
next = first;
|
||||
offset = xas->xa_offset;
|
||||
max = xas->xa_offset + xas->xa_sibs;
|
||||
if (node) {
|
||||
slot = &node->slots[offset];
|
||||
if (xas->xa_sibs)
|
||||
xas_squash_marks(xas);
|
||||
}
|
||||
if (!entry)
|
||||
xas_init_marks(xas);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Must clear the marks before setting the entry to NULL,
|
||||
* otherwise xas_for_each_marked may find a NULL entry and
|
||||
* stop early. rcu_assign_pointer contains a release barrier
|
||||
* so the mark clearing will appear to happen before the
|
||||
* entry is set to NULL.
|
||||
*/
|
||||
rcu_assign_pointer(*slot, entry);
|
||||
if (xa_is_node(next))
|
||||
xas_free_nodes(xas, xa_to_node(next));
|
||||
if (!node)
|
||||
break;
|
||||
count += !next - !entry;
|
||||
values += !xa_is_value(first) - !value;
|
||||
if (entry) {
|
||||
if (offset == max)
|
||||
break;
|
||||
if (!xa_is_sibling(entry))
|
||||
entry = xa_mk_sibling(xas->xa_offset);
|
||||
} else {
|
||||
if (offset == XA_CHUNK_MASK)
|
||||
break;
|
||||
}
|
||||
next = xa_entry_locked(xas->xa, node, ++offset);
|
||||
if (!xa_is_sibling(next)) {
|
||||
if (!entry && (offset > max))
|
||||
break;
|
||||
first = next;
|
||||
}
|
||||
slot++;
|
||||
}
|
||||
|
||||
update_node(xas, node, count, values);
|
||||
return first;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_store);
|
||||
|
||||
/**
|
||||
* xas_get_mark() - Returns the state of this mark.
|
||||
* @xas: XArray operation state.
|
||||
@ -240,6 +805,30 @@ void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_clear_mark);
|
||||
|
||||
/**
|
||||
* xas_init_marks() - Initialise all marks for the entry
|
||||
* @xas: Array operations state.
|
||||
*
|
||||
* Initialise all marks for the entry specified by @xas. If we're tracking
|
||||
* free entries with a mark, we need to set it on all entries. All other
|
||||
* marks are cleared.
|
||||
*
|
||||
* This implementation is not as efficient as it could be; we may walk
|
||||
* up the tree multiple times.
|
||||
*/
|
||||
void xas_init_marks(const struct xa_state *xas)
|
||||
{
|
||||
xa_mark_t mark = 0;
|
||||
|
||||
for (;;) {
|
||||
xas_clear_mark(xas, mark);
|
||||
if (mark == XA_MARK_MAX)
|
||||
break;
|
||||
mark_inc(mark);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_init_marks);
|
||||
|
||||
/**
|
||||
* xa_init_flags() - Initialise an empty XArray with flags.
|
||||
* @xa: XArray.
|
||||
@ -253,9 +842,19 @@ EXPORT_SYMBOL_GPL(xas_clear_mark);
|
||||
*/
|
||||
void xa_init_flags(struct xarray *xa, gfp_t flags)
|
||||
{
|
||||
unsigned int lock_type;
|
||||
static struct lock_class_key xa_lock_irq;
|
||||
static struct lock_class_key xa_lock_bh;
|
||||
|
||||
spin_lock_init(&xa->xa_lock);
|
||||
xa->xa_flags = flags;
|
||||
xa->xa_head = NULL;
|
||||
|
||||
lock_type = xa_lock_type(xa);
|
||||
if (lock_type == XA_LOCK_IRQ)
|
||||
lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
|
||||
else if (lock_type == XA_LOCK_BH)
|
||||
lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
|
||||
}
|
||||
EXPORT_SYMBOL(xa_init_flags);
|
||||
|
||||
@ -282,6 +881,100 @@ void *xa_load(struct xarray *xa, unsigned long index)
|
||||
}
|
||||
EXPORT_SYMBOL(xa_load);
|
||||
|
||||
static void *xas_result(struct xa_state *xas, void *curr)
|
||||
{
|
||||
XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
|
||||
if (xas_error(xas))
|
||||
curr = xas->xa_node;
|
||||
return curr;
|
||||
}
|
||||
|
||||
/**
|
||||
* __xa_erase() - Erase this entry from the XArray while locked.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
*
|
||||
* If the entry at this index is a multi-index entry then all indices will
|
||||
* be erased, and the entry will no longer be a multi-index entry.
|
||||
* This function expects the xa_lock to be held on entry.
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if @gfp flags permit.
|
||||
* Return: The old entry at this index.
|
||||
*/
|
||||
void *__xa_erase(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
XA_STATE(xas, xa, index);
|
||||
return xas_result(&xas, xas_store(&xas, NULL));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__xa_erase);
|
||||
|
||||
/**
|
||||
* xa_store() - Store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* After this function returns, loads from this index will return @entry.
|
||||
* Storing into an existing multislot entry updates the entry of every index.
|
||||
* The marks associated with @index are unaffected unless @entry is %NULL.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock. May sleep
|
||||
* if the @gfp flags permit.
|
||||
* Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
|
||||
* cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
|
||||
* failed.
|
||||
*/
|
||||
void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
|
||||
{
|
||||
XA_STATE(xas, xa, index);
|
||||
void *curr;
|
||||
|
||||
if (WARN_ON_ONCE(xa_is_internal(entry)))
|
||||
return XA_ERROR(-EINVAL);
|
||||
|
||||
do {
|
||||
xas_lock(&xas);
|
||||
curr = xas_store(&xas, entry);
|
||||
xas_unlock(&xas);
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
|
||||
return xas_result(&xas, curr);
|
||||
}
|
||||
EXPORT_SYMBOL(xa_store);
|
||||
|
||||
/**
|
||||
* __xa_store() - Store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* You must already be holding the xa_lock when calling this function.
|
||||
* It will drop the lock if needed to allocate memory, and then reacquire
|
||||
* it afterwards.
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if @gfp flags permit.
|
||||
* Return: The old entry at this index or xa_err() if an error happened.
|
||||
*/
|
||||
void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
|
||||
{
|
||||
XA_STATE(xas, xa, index);
|
||||
void *curr;
|
||||
|
||||
if (WARN_ON_ONCE(xa_is_internal(entry)))
|
||||
return XA_ERROR(-EINVAL);
|
||||
|
||||
do {
|
||||
curr = xas_store(&xas, entry);
|
||||
} while (__xas_nomem(&xas, gfp));
|
||||
|
||||
return xas_result(&xas, curr);
|
||||
}
|
||||
EXPORT_SYMBOL(__xa_store);
|
||||
|
||||
/**
|
||||
* __xa_set_mark() - Set this mark on this entry while locked.
|
||||
* @xa: XArray.
|
||||
|
@ -15,6 +15,7 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits);
|
||||
void bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||
|
||||
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
|
||||
|
||||
|
@ -37,4 +37,6 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
|
||||
return true;
|
||||
}
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#endif
|
||||
|
@ -5,7 +5,7 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
|
||||
LDFLAGS += -fsanitize=address -fsanitize=undefined
|
||||
LDLIBS+= -lpthread -lurcu
|
||||
TARGETS = main idr-test multiorder xarray
|
||||
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
|
||||
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
|
||||
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
|
||||
tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
|
||||
|
||||
|
23
tools/testing/radix-tree/bitmap.c
Normal file
23
tools/testing/radix-tree/bitmap.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* lib/bitmap.c pulls in at least two other files. */
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
void bitmap_clear(unsigned long *map, unsigned int start, int len)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
const unsigned int size = start + len;
|
||||
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||
|
||||
while (len - bits_to_clear >= 0) {
|
||||
*p &= ~mask_to_clear;
|
||||
len -= bits_to_clear;
|
||||
bits_to_clear = BITS_PER_LONG;
|
||||
mask_to_clear = ~0UL;
|
||||
p++;
|
||||
}
|
||||
if (len) {
|
||||
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||
*p &= ~mask_to_clear;
|
||||
}
|
||||
}
|
@ -18,4 +18,8 @@
|
||||
#define pr_debug printk
|
||||
#define pr_cont printk
|
||||
|
||||
#define __acquires(x)
|
||||
#define __releases(x)
|
||||
#define __must_hold(x)
|
||||
|
||||
#endif /* _KERNEL_H */
|
||||
|
11
tools/testing/radix-tree/linux/lockdep.h
Normal file
11
tools/testing/radix-tree/linux/lockdep.h
Normal file
@ -0,0 +1,11 @@
|
||||
#ifndef _LINUX_LOCKDEP_H
|
||||
#define _LINUX_LOCKDEP_H
|
||||
struct lock_class_key {
|
||||
unsigned int a;
|
||||
};
|
||||
|
||||
static inline void lockdep_set_class(spinlock_t *lock,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
}
|
||||
#endif /* _LINUX_LOCKDEP_H */
|
Loading…
Reference in New Issue
Block a user