2017-11-07 19:57:46 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/*
|
|
|
|
* test_xarray.c: Test the XArray API
|
|
|
|
* Copyright (c) 2017-2018 Microsoft Corporation
|
2020-01-18 03:13:21 +00:00
|
|
|
* Copyright (c) 2019-2020 Oracle
|
2017-11-07 19:57:46 +00:00
|
|
|
* Author: Matthew Wilcox <willy@infradead.org>
|
|
|
|
*/
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
#include <kunit/test.h>
|
2017-11-07 19:57:46 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/xarray.h>
|
2017-11-07 19:57:46 +00:00
|
|
|
|
2020-01-31 10:07:55 +00:00
|
|
|
static const unsigned int order_limit =
|
|
|
|
IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
|
|
|
|
|
2017-11-07 19:57:46 +00:00
|
|
|
#ifndef XA_DEBUG
|
|
|
|
# ifdef __KERNEL__
|
|
|
|
void xa_dump(const struct xarray *xa) { }
|
|
|
|
# endif
|
|
|
|
#undef XA_BUG_ON
|
2024-12-05 15:11:26 +00:00
|
|
|
#define XA_BUG_ON(xa, x) do { \
|
|
|
|
if (x) { \
|
|
|
|
KUNIT_FAIL(test, #x); \
|
|
|
|
xa_dump(xa); \
|
|
|
|
dump_stack(); \
|
|
|
|
} \
|
2017-11-07 19:57:46 +00:00
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2018-11-05 18:19:54 +00:00
|
|
|
static void *xa_mk_index(unsigned long index)
|
|
|
|
{
|
|
|
|
return xa_mk_value(index & LONG_MAX);
|
|
|
|
}
|
|
|
|
|
2017-11-07 19:57:46 +00:00
|
|
|
static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
|
|
|
|
{
|
2018-11-05 18:19:54 +00:00
|
|
|
return xa_store(xa, index, xa_mk_index(index), gfp);
|
2017-11-07 19:57:46 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index)
|
2019-03-10 03:25:27 +00:00
|
|
|
{
|
|
|
|
XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
|
|
|
|
GFP_KERNEL) != 0);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp)
|
2018-07-04 14:50:12 +00:00
|
|
|
{
|
2018-12-31 15:41:01 +00:00
|
|
|
u32 id;
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
|
2018-07-04 14:50:12 +00:00
|
|
|
gfp) != 0);
|
|
|
|
XA_BUG_ON(xa, id != index);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index)
|
2017-11-07 19:57:46 +00:00
|
|
|
{
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, index) != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If anyone needs this, please move it to xarray.c. We have no current
|
|
|
|
* users outside the test suite because all current multislot users want
|
|
|
|
* to use the advanced API.
|
|
|
|
*/
|
|
|
|
static void *xa_store_order(struct xarray *xa, unsigned long index,
|
|
|
|
unsigned order, void *entry, gfp_t gfp)
|
|
|
|
{
|
|
|
|
XA_STATE_ORDER(xas, xa, index, order);
|
|
|
|
void *curr;
|
|
|
|
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
curr = xas_store(&xas, entry);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, gfp));
|
|
|
|
|
|
|
|
return curr;
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static inline struct xarray *xa_param(struct kunit *test)
|
2017-11-10 20:15:08 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
return *(struct xarray **)test->param_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline void check_xa_err(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
|
|
|
|
#ifndef __KERNEL__
|
|
|
|
/* The kernel does not fail GFP_NOWAIT allocations */
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
|
|
|
|
#endif
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
|
|
|
|
XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
|
|
|
|
// kills the test-suite :-(
|
|
|
|
// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
|
2017-11-07 19:57:46 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xas_retry(struct kunit *test)
|
2017-11-14 13:30:11 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
xa_store_index(xa, 0, GFP_KERNEL);
|
|
|
|
xa_store_index(xa, 1, GFP_KERNEL);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 1);
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
|
|
|
|
XA_BUG_ON(xa, xas_retry(&xas, NULL));
|
|
|
|
XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
|
|
|
|
xas_reset(&xas);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
|
|
|
|
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xas.xa_node != NULL);
|
2019-02-05 04:12:08 +00:00
|
|
|
rcu_read_unlock();
|
2017-11-14 13:30:11 +00:00
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
|
2019-02-05 04:12:08 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
|
|
|
|
xas.xa_node = XAS_RESTART;
|
|
|
|
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* Make sure we can iterate through retry entries */
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
xas_store(&xas, XA_RETRY_ENTRY);
|
|
|
|
xas_set(&xas, 1);
|
|
|
|
xas_store(&xas, XA_RETRY_ENTRY);
|
|
|
|
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
2018-11-05 18:19:54 +00:00
|
|
|
xas_store(&xas, xa_mk_index(xas.xa_index));
|
2017-11-14 13:30:11 +00:00
|
|
|
}
|
|
|
|
xas_unlock(&xas);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
|
|
|
xa_erase_index(test, xa, 1);
|
2017-11-14 13:30:11 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_load(struct kunit *test)
|
2017-11-07 19:57:46 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-07 19:57:46 +00:00
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < 1024; i++) {
|
|
|
|
for (j = 0; j < 1024; j++) {
|
|
|
|
void *entry = xa_load(xa, j);
|
|
|
|
if (j < i)
|
|
|
|
XA_BUG_ON(xa, xa_to_value(entry) != j);
|
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry);
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 1024; i++) {
|
|
|
|
for (j = 0; j < 1024; j++) {
|
|
|
|
void *entry = xa_load(xa, j);
|
|
|
|
if (j >= i)
|
|
|
|
XA_BUG_ON(xa, xa_to_value(entry) != j);
|
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry);
|
|
|
|
}
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-11-07 19:57:46 +00:00
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_mark_1(struct kunit *test, unsigned long index)
|
2017-11-10 14:34:31 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
unsigned int order;
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
|
|
|
|
|
2017-11-10 14:34:31 +00:00
|
|
|
/* NULL elements have no marks set */
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
|
|
|
xa_set_mark(xa, index, XA_MARK_0);
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
|
|
|
|
|
|
|
/* Storing a pointer will not make a mark appear */
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
|
|
|
xa_set_mark(xa, index, XA_MARK_0);
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
|
|
|
|
|
|
|
|
/* Setting one mark will not set another mark */
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
|
|
|
|
|
|
|
|
/* Storing NULL clears marks, and they can't be set again */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index);
|
2017-11-10 14:34:31 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
|
|
|
xa_set_mark(xa, index, XA_MARK_0);
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
|
2017-11-10 20:15:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Storing a multi-index entry over entries with marks gives the
|
|
|
|
* entire entry the union of the marks
|
|
|
|
*/
|
|
|
|
BUG_ON((index % 4) != 0);
|
|
|
|
for (order = 2; order < max_order; order++) {
|
|
|
|
unsigned long base = round_down(index, 1UL << order);
|
|
|
|
unsigned long next = base + (1UL << order);
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
|
|
|
|
xa_set_mark(xa, index + 1, XA_MARK_0);
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
|
2019-01-14 18:57:31 +00:00
|
|
|
xa_set_mark(xa, index + 2, XA_MARK_2);
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_order(xa, index, order, xa_mk_index(index),
|
2017-11-10 20:15:08 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
for (i = base; i < next; i++) {
|
2018-09-08 16:09:52 +00:00
|
|
|
XA_STATE(xas, xa, i);
|
|
|
|
unsigned int seen = 0;
|
|
|
|
void *entry;
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
|
2019-01-14 18:57:31 +00:00
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
|
2018-09-08 16:09:52 +00:00
|
|
|
|
|
|
|
/* We should see two elements in the array */
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_lock();
|
2018-09-08 16:09:52 +00:00
|
|
|
xas_for_each(&xas, entry, ULONG_MAX)
|
|
|
|
seen++;
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_unlock();
|
2018-09-08 16:09:52 +00:00
|
|
|
XA_BUG_ON(xa, seen != 2);
|
|
|
|
|
|
|
|
/* One of which is marked */
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
seen = 0;
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_lock();
|
2018-09-08 16:09:52 +00:00
|
|
|
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
|
|
|
|
seen++;
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_unlock();
|
2018-09-08 16:09:52 +00:00
|
|
|
XA_BUG_ON(xa, seen != 1);
|
2017-11-10 20:15:08 +00:00
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
|
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index);
|
|
|
|
xa_erase_index(test, xa, next);
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
2017-11-10 14:34:31 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_mark_2(struct kunit *test)
|
2018-04-09 20:52:21 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-04-09 20:52:21 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
unsigned long index;
|
|
|
|
unsigned int count = 0;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
xa_store_index(xa, 0, GFP_KERNEL);
|
|
|
|
xa_set_mark(xa, 0, XA_MARK_0);
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_load(&xas);
|
|
|
|
xas_init_marks(&xas);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
|
|
|
|
|
|
|
|
for (index = 3500; index < 4500; index++) {
|
|
|
|
xa_store_index(xa, index, GFP_KERNEL);
|
|
|
|
xa_set_mark(xa, index, XA_MARK_0);
|
|
|
|
}
|
|
|
|
|
|
|
|
xas_reset(&xas);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
|
|
|
|
count++;
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, count != 1000);
|
|
|
|
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
xas_init_marks(&xas);
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
|
|
|
|
XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
|
|
|
|
}
|
|
|
|
xas_unlock(&xas);
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_mark_3(struct kunit *test)
|
2020-06-15 01:52:04 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-06-15 01:52:04 +00:00
|
|
|
XA_STATE(xas, xa, 0x41);
|
|
|
|
void *entry;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
|
|
|
|
xa_set_mark(xa, 0x41, XA_MARK_0);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
|
|
|
|
count++;
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(0x40));
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, count != 1);
|
|
|
|
rcu_read_unlock();
|
|
|
|
xa_destroy(xa);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_mark(struct kunit *test)
|
2017-11-10 14:34:31 +00:00
|
|
|
{
|
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
for (index = 0; index < 16384; index += 4)
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_mark_1(test, index);
|
2018-04-09 20:52:21 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_mark_2(test);
|
|
|
|
check_xa_mark_3(test);
|
2017-11-10 14:34:31 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_shrink(struct kunit *test)
|
2017-11-10 20:15:08 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_STATE(xas, xa, 1);
|
|
|
|
struct xa_node *node;
|
2018-09-08 16:09:52 +00:00
|
|
|
unsigned int order;
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
|
2017-11-10 20:15:08 +00:00
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that erasing the entry at 1 shrinks the tree and properly
|
|
|
|
* marks the node as being deleted.
|
|
|
|
*/
|
|
|
|
xas_lock(&xas);
|
|
|
|
XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
|
|
|
|
node = xas.xa_node;
|
|
|
|
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
|
|
|
|
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
|
|
|
|
XA_BUG_ON(xa, xas_load(&xas) != NULL);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
2018-09-08 16:09:52 +00:00
|
|
|
|
|
|
|
for (order = 0; order < max_order; order++) {
|
|
|
|
unsigned long max = (1UL << order) - 1;
|
|
|
|
xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
|
|
|
|
rcu_read_lock();
|
|
|
|
node = xa_head(xa);
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
|
|
|
|
NULL);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xa_head(xa) == node);
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, ULONG_MAX);
|
2018-09-08 16:09:52 +00:00
|
|
|
XA_BUG_ON(xa, xa->xa_head != node);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
2018-09-08 16:09:52 +00:00
|
|
|
}
|
2017-11-10 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_insert(struct kunit *test)
|
2019-03-10 03:25:27 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2019-03-10 03:25:27 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
for (i = 0; i < 1024; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_insert_index(test, xa, i);
|
2019-03-10 03:25:27 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2019-03-10 03:25:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 10; i < BITS_PER_LONG; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_insert_index(test, xa, 1UL << i);
|
2019-03-10 03:25:27 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 1UL << i);
|
2019-03-10 03:25:27 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_insert_index(test, xa, (1UL << i) - 1);
|
2019-03-10 03:25:27 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, (1UL << i) - 1);
|
2019-03-10 03:25:27 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_insert_index(test, xa, ~0UL);
|
2019-03-10 03:25:27 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, ~0UL);
|
2019-03-10 03:25:27 +00:00
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_cmpxchg(struct kunit *test)
|
2017-11-10 20:34:55 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-10 20:34:55 +00:00
|
|
|
void *FIVE = xa_mk_value(5);
|
|
|
|
void *SIX = xa_mk_value(6);
|
|
|
|
void *LOTS = xa_mk_value(12345678);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
|
2019-02-06 18:07:11 +00:00
|
|
|
XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
|
2017-11-10 20:34:55 +00:00
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
|
2020-03-31 18:23:59 +00:00
|
|
|
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
|
|
|
|
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 12345678);
|
|
|
|
xa_erase_index(test, xa, 5);
|
2017-11-10 20:34:55 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_cmpxchg_order(struct kunit *test)
|
2024-01-31 22:51:25 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-01-31 22:51:25 +00:00
|
|
|
void *FIVE = xa_mk_value(5);
|
|
|
|
unsigned int i, order = 3;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
|
|
|
|
|
|
|
|
/* Check entry FIVE has the order saved */
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
|
|
|
|
|
|
|
|
/* Check all the tied indexes have the same entry and order */
|
|
|
|
for (i = 0; i < (1 << order); i++) {
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, i) != order);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure that nothing is stored at index '1 << order' */
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Additionally, keep the node information and the order at
|
|
|
|
* '1 << order'
|
|
|
|
*/
|
|
|
|
XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
|
|
|
|
for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, i) != order);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Conditionally replace FIVE entry at index '0' with NULL */
|
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
|
|
|
|
|
|
|
|
/* Verify the order is lost at FIVE (and old) entries */
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
|
|
|
|
|
|
|
|
/* Verify the order and entries are lost in all the tied indexes */
|
|
|
|
for (i = 0; i < (1 << order); i++) {
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, i) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify node and order are kept at '1 << order' */
|
|
|
|
for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, i) != order);
|
|
|
|
}
|
|
|
|
|
|
|
|
xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_reserve(struct kunit *test)
|
2018-10-01 18:54:59 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-10-01 18:54:59 +00:00
|
|
|
void *entry;
|
2018-12-17 19:45:36 +00:00
|
|
|
unsigned long index;
|
2019-02-20 16:30:49 +00:00
|
|
|
int count;
|
2018-10-01 18:54:59 +00:00
|
|
|
|
|
|
|
/* An array with a reserved entry is not empty */
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
2019-02-08 19:02:45 +00:00
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
|
2018-10-01 18:54:59 +00:00
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 12345678));
|
|
|
|
xa_release(xa, 12345678);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* Releasing a used entry does nothing */
|
2019-02-08 19:02:45 +00:00
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
|
2018-10-01 18:54:59 +00:00
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
|
|
|
|
xa_release(xa, 12345678);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 12345678);
|
2018-10-01 18:54:59 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
2019-02-20 16:30:49 +00:00
|
|
|
/* cmpxchg sees a reserved entry as ZERO */
|
2019-02-08 19:02:45 +00:00
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
|
2019-02-20 16:30:49 +00:00
|
|
|
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
|
|
|
|
xa_mk_value(12345678), GFP_NOWAIT) != NULL);
|
2018-10-01 18:54:59 +00:00
|
|
|
xa_release(xa, 12345678);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 12345678);
|
2018-10-01 18:54:59 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
2019-02-20 16:30:49 +00:00
|
|
|
/* xa_insert treats it as busy */
|
2019-02-08 19:02:45 +00:00
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
|
2019-01-02 18:57:03 +00:00
|
|
|
XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
|
2019-02-06 18:07:11 +00:00
|
|
|
-EBUSY);
|
2019-01-02 18:57:03 +00:00
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
|
2018-10-30 13:45:55 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
2018-10-01 18:54:59 +00:00
|
|
|
/* Can iterate through a reserved entry */
|
|
|
|
xa_store_index(xa, 5, GFP_KERNEL);
|
2019-02-08 19:02:45 +00:00
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
|
2018-10-01 18:54:59 +00:00
|
|
|
xa_store_index(xa, 7, GFP_KERNEL);
|
|
|
|
|
2019-02-20 16:30:49 +00:00
|
|
|
count = 0;
|
2018-12-17 19:45:36 +00:00
|
|
|
xa_for_each(xa, index, entry) {
|
2018-10-01 18:54:59 +00:00
|
|
|
XA_BUG_ON(xa, index != 5 && index != 7);
|
2019-02-20 16:30:49 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, count != 2);
|
|
|
|
|
|
|
|
/* If we free a reserved entry, we should be able to allocate it */
|
|
|
|
if (xa->xa_flags & XA_FLAGS_ALLOC) {
|
|
|
|
u32 id;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
|
|
|
|
XA_LIMIT(5, 10), GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != 8);
|
|
|
|
|
|
|
|
xa_release(xa, 6);
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
|
|
|
|
XA_LIMIT(5, 10), GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != 6);
|
2018-10-01 18:54:59 +00:00
|
|
|
}
|
2019-02-20 16:30:49 +00:00
|
|
|
|
2018-10-01 18:54:59 +00:00
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xas_erase(struct kunit *test)
|
2017-11-14 13:30:11 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
void *entry;
|
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < 200; i++) {
|
|
|
|
for (j = i; j < 2 * i + 17; j++) {
|
|
|
|
xas_set(&xas, j);
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
2018-11-05 18:19:54 +00:00
|
|
|
xas_store(&xas, xa_mk_index(j));
|
2017-11-14 13:30:11 +00:00
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
}
|
|
|
|
|
|
|
|
xas_set(&xas, ULONG_MAX);
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, xa_mk_value(0));
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, NULL);
|
|
|
|
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
j = i;
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(j));
|
2017-11-14 13:30:11 +00:00
|
|
|
xas_store(&xas, NULL);
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
xas_unlock(&xas);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-09 05:52:17 +00:00
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_store_1(struct kunit *test, unsigned long index,
|
2018-09-09 05:52:17 +00:00
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
unsigned long min = index & ~((1UL << order) - 1);
|
|
|
|
unsigned long max = min + (1UL << order);
|
|
|
|
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
|
|
|
|
|
2018-11-19 14:36:29 +00:00
|
|
|
xas_lock(&xas);
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
|
2018-11-19 14:36:29 +00:00
|
|
|
xas_unlock(&xas);
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, min);
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_store_2(struct kunit *test, unsigned long index,
|
2018-09-09 05:52:17 +00:00
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
|
|
|
|
|
2018-11-19 14:36:29 +00:00
|
|
|
xas_lock(&xas);
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != index);
|
|
|
|
XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
|
2018-11-19 14:36:29 +00:00
|
|
|
xas_unlock(&xas);
|
2018-09-09 05:52:17 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
2018-11-29 21:04:35 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_store_3(struct kunit *test, unsigned long index,
|
2018-11-29 21:04:35 +00:00
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-11-29 21:04:35 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
void *entry;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
|
|
|
|
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, n != 1);
|
|
|
|
xas_set(&xas, index + 1);
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, n != 2);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
2018-09-09 05:52:17 +00:00
|
|
|
#endif
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_store(struct kunit *test)
|
2017-11-10 20:15:08 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
unsigned long i, j, k;
|
|
|
|
unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
|
|
|
|
|
|
|
|
/* Loading from any position returns the same value */
|
|
|
|
xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* Storing adjacent to the value does not alter the value */
|
|
|
|
xa_store(xa, 3, xa, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* Overwriting multiple indexes works */
|
|
|
|
xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
|
|
|
|
XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* We can erase multiple values with a single store */
|
2018-11-05 14:34:04 +00:00
|
|
|
xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
|
2017-11-10 20:15:08 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* Even when the first slot is empty but the others aren't */
|
|
|
|
xa_store_index(xa, 1, GFP_KERNEL);
|
|
|
|
xa_store_index(xa, 2, GFP_KERNEL);
|
|
|
|
xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (i = 0; i < max_order; i++) {
|
|
|
|
for (j = 0; j < max_order; j++) {
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
|
|
|
|
xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
|
2017-11-10 20:15:08 +00:00
|
|
|
|
|
|
|
for (k = 0; k < max_order; k++) {
|
|
|
|
void *entry = xa_load(xa, (1UL << k) - 1);
|
|
|
|
if ((i < k) && (j < k))
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
else
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(j));
|
2017-11-10 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
xa_erase(xa, 0);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
}
|
2018-09-09 05:52:17 +00:00
|
|
|
|
|
|
|
for (i = 0; i < 20; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
check_multi_store_1(test, 200, i);
|
|
|
|
check_multi_store_1(test, 0, i);
|
|
|
|
check_multi_store_1(test, (1UL << i) + 1, i);
|
2018-09-09 05:52:17 +00:00
|
|
|
}
|
2024-12-05 15:11:26 +00:00
|
|
|
check_multi_store_2(test, 4095, 9);
|
2018-11-29 21:04:35 +00:00
|
|
|
|
|
|
|
for (i = 1; i < 20; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
check_multi_store_3(test, 0, i);
|
|
|
|
check_multi_store_3(test, 1UL << i, i);
|
2018-11-29 21:04:35 +00:00
|
|
|
}
|
2017-11-10 20:15:08 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-01-31 22:51:24 +00:00
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
|
|
/* mimics page cache __filemap_add_folio() */
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_multi_store_adv_add(struct kunit *test,
|
2024-01-31 22:51:24 +00:00
|
|
|
unsigned long index,
|
|
|
|
unsigned int order,
|
|
|
|
void *p)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
unsigned int nrpages = 1UL << order;
|
|
|
|
|
|
|
|
/* users are responsible for index alignemnt to the order when adding */
|
|
|
|
XA_BUG_ON(xa, index & (nrpages - 1));
|
|
|
|
|
|
|
|
xas_set_order(&xas, index, order);
|
|
|
|
|
|
|
|
do {
|
|
|
|
xas_lock_irq(&xas);
|
|
|
|
xas_store(&xas, p);
|
|
|
|
xas_unlock_irq(&xas);
|
2024-04-23 19:22:21 +00:00
|
|
|
/*
|
|
|
|
* In our selftest case the only failure we can expect is for
|
|
|
|
* there not to be enough memory as we're not mimicking the
|
|
|
|
* entire page cache, so verify that's the only error we can run
|
|
|
|
* into here. The xas_nomem() which follows will ensure to fix
|
|
|
|
* that condition for us so to chug on on the loop.
|
|
|
|
*/
|
|
|
|
XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
|
2024-01-31 22:51:24 +00:00
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xas_error(&xas));
|
2024-04-23 19:22:21 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, index) != p);
|
2024-01-31 22:51:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* mimics page_cache_delete() */
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test,
|
2024-01-31 22:51:24 +00:00
|
|
|
unsigned long index,
|
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
|
|
|
|
xas_set_order(&xas, index, order);
|
|
|
|
xas_store(&xas, NULL);
|
|
|
|
xas_init_marks(&xas);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_multi_store_adv_delete(struct kunit *test,
|
2024-01-31 22:51:24 +00:00
|
|
|
unsigned long index,
|
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-01-31 22:51:24 +00:00
|
|
|
xa_lock_irq(xa);
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_del_entry(test, index, order);
|
2024-01-31 22:51:24 +00:00
|
|
|
xa_unlock_irq(xa);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mimics page cache filemap_get_entry() */
|
|
|
|
static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
|
|
|
|
{
|
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
void *p;
|
|
|
|
static unsigned int loops = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
repeat:
|
|
|
|
xas_reset(&xas);
|
|
|
|
p = xas_load(&xas);
|
|
|
|
if (xas_retry(&xas, p))
|
|
|
|
goto repeat;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is not part of the page cache, this selftest is pretty
|
|
|
|
* aggressive and does not want to trust the xarray API but rather
|
|
|
|
* test it, and for order 20 (4 GiB block size) we can loop over
|
|
|
|
* over a million entries which can cause a soft lockup. Page cache
|
|
|
|
* APIs won't be stupid, proper page cache APIs loop over the proper
|
|
|
|
* order so when using a larger order we skip shared entries.
|
|
|
|
*/
|
|
|
|
if (++loops % XA_CHECK_SCHED == 0)
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long some_val = 0xdeadbeef;
|
|
|
|
static unsigned long some_val_2 = 0xdeaddead;
|
|
|
|
|
|
|
|
/* mimics the page cache usage */
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_multi_store_adv(struct kunit *test,
|
2024-01-31 22:51:24 +00:00
|
|
|
unsigned long pos,
|
|
|
|
unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-01-31 22:51:24 +00:00
|
|
|
unsigned int nrpages = 1UL << order;
|
|
|
|
unsigned long index, base, next_index, next_next_index;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
index = pos >> PAGE_SHIFT;
|
|
|
|
base = round_down(index, nrpages);
|
|
|
|
next_index = round_down(base + nrpages, nrpages);
|
|
|
|
next_next_index = round_down(next_index + nrpages, nrpages);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_add(test, base, order, &some_val);
|
2024-01-31 22:51:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
|
|
|
|
|
|
|
|
/* Use order 0 for the next item */
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2);
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
|
|
|
|
|
|
|
|
/* Remove the next item */
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_delete(test, next_index, 0);
|
2024-01-31 22:51:24 +00:00
|
|
|
|
|
|
|
/* Now use order for a new pointer */
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_add(test, next_index, order, &some_val_2);
|
2024-01-31 22:51:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_delete(test, next_index, order);
|
|
|
|
check_xa_multi_store_adv_delete(test, base, order);
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* starting fresh again */
|
|
|
|
|
|
|
|
/* let's test some holes now */
|
|
|
|
|
|
|
|
/* hole at base and next_next */
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_add(test, next_index, order, &some_val_2);
|
2024-01-31 22:51:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
|
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_delete(test, next_index, order);
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* hole at base and next */
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2);
|
2024-01-31 22:51:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < nrpages; i++)
|
|
|
|
XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv_delete(test, next_next_index, order);
|
2024-01-31 22:51:24 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_store_advanced(struct kunit *test)
|
2024-01-31 22:51:24 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
|
|
|
|
unsigned long end = ULONG_MAX/2;
|
|
|
|
unsigned long pos, i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* About 117 million tests below.
|
|
|
|
*/
|
|
|
|
for (pos = 7; pos < end; pos = (pos * pos) + 564) {
|
|
|
|
for (i = 0; i < max_order; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_multi_store_adv(test, pos, i);
|
|
|
|
check_xa_multi_store_adv(test, pos + 157, i);
|
2024-01-31 22:51:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base)
|
2018-07-04 14:50:12 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 id;
|
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
/* An empty array should assign %base to the first alloc */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base, GFP_KERNEL);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
|
|
|
/* Erasing it should make the array empty again */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, base);
|
2018-10-26 18:43:22 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* And it should assign %base again */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base, GFP_KERNEL);
|
2018-10-26 18:43:22 +00:00
|
|
|
|
|
|
|
/* Allocating and then erasing a lot should not lose base */
|
|
|
|
for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, i, GFP_KERNEL);
|
2018-10-26 18:43:22 +00:00
|
|
|
for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
|
|
|
xa_alloc_index(test, xa, base, GFP_KERNEL);
|
2018-10-26 18:43:22 +00:00
|
|
|
|
|
|
|
/* Destroying the array should do the same as erasing */
|
|
|
|
xa_destroy(xa);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
/* And it should assign %base again */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base, GFP_KERNEL);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
/* The next assigned ID should be base+1 */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base + 1, GFP_KERNEL);
|
|
|
|
xa_erase_index(test, xa, base + 1);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
|
|
|
/* Storing a value should mark it used */
|
2018-10-26 18:43:22 +00:00
|
|
|
xa_store_index(xa, base + 1, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base + 2, GFP_KERNEL);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
/* If we then erase base, it should be free */
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, base);
|
|
|
|
xa_alloc_index(test, xa, base, GFP_KERNEL);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, base + 1);
|
|
|
|
xa_erase_index(test, xa, base + 2);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
|
|
|
for (i = 1; i < 5000; i++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_alloc_index(test, xa, base + i, GFP_KERNEL);
|
2018-07-04 14:50:12 +00:00
|
|
|
}
|
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
xa_destroy(xa);
|
2018-07-04 14:50:12 +00:00
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
/* Check that we fail properly at the limit of allocation */
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
|
|
|
|
XA_LIMIT(UINT_MAX - 1, UINT_MAX),
|
2018-07-04 14:50:12 +00:00
|
|
|
GFP_KERNEL) != 0);
|
2018-10-26 18:43:22 +00:00
|
|
|
XA_BUG_ON(xa, id != 0xfffffffeU);
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
|
|
|
|
XA_LIMIT(UINT_MAX - 1, UINT_MAX),
|
2018-07-04 14:50:12 +00:00
|
|
|
GFP_KERNEL) != 0);
|
2018-10-26 18:43:22 +00:00
|
|
|
XA_BUG_ON(xa, id != 0xffffffffU);
|
2018-12-31 15:41:01 +00:00
|
|
|
id = 3;
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
|
|
|
|
XA_LIMIT(UINT_MAX - 1, UINT_MAX),
|
|
|
|
GFP_KERNEL) != -EBUSY);
|
|
|
|
XA_BUG_ON(xa, id != 3);
|
2018-10-26 18:43:22 +00:00
|
|
|
xa_destroy(xa);
|
2018-12-13 18:57:42 +00:00
|
|
|
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
|
|
|
|
GFP_KERNEL) != -EBUSY);
|
2024-12-10 03:06:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL);
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
|
|
|
|
GFP_KERNEL) != -EBUSY);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 3);
|
2018-10-26 18:43:22 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base)
|
2018-12-31 15:41:01 +00:00
|
|
|
{
|
|
|
|
unsigned int i, id;
|
|
|
|
unsigned long index;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
/* Allocate and free a NULL and check xa_empty() behaves */
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != base);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* Ditto, but check destroy instead of erase */
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != base);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
xa_destroy(xa);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (i = base; i < base + 10; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
|
|
|
|
GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != i);
|
|
|
|
}
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != 5);
|
|
|
|
|
|
|
|
xa_for_each(xa, index, entry) {
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index);
|
2018-12-31 15:41:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = base; i < base + 9; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base)
|
2018-11-06 19:13:35 +00:00
|
|
|
{
|
|
|
|
struct xa_limit limit = XA_LIMIT(1, 0x3fff);
|
|
|
|
u32 next = 0;
|
|
|
|
unsigned int i, id;
|
|
|
|
unsigned long index;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
|
|
|
|
&next, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != 1);
|
|
|
|
|
|
|
|
next = 0x3ffd;
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
|
|
|
|
&next, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != 0x3ffd);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0x3ffd);
|
|
|
|
xa_erase_index(test, xa, 1);
|
2018-11-06 19:13:35 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (i = 0x3ffe; i < 0x4003; i++) {
|
|
|
|
if (i < 0x4000)
|
|
|
|
entry = xa_mk_index(i);
|
|
|
|
else
|
|
|
|
entry = xa_mk_index(i - 0x3fff);
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
|
|
|
|
&next, GFP_KERNEL) != (id == 1));
|
|
|
|
XA_BUG_ON(xa, xa_mk_index(id) != entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check wrap-around is handled correctly */
|
|
|
|
if (base != 0)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, base);
|
|
|
|
xa_erase_index(test, xa, base + 1);
|
2018-11-06 19:13:35 +00:00
|
|
|
next = UINT_MAX;
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
|
|
|
|
xa_limit_32b, &next, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != UINT_MAX);
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
|
|
|
|
xa_limit_32b, &next, GFP_KERNEL) != 1);
|
|
|
|
XA_BUG_ON(xa, id != base);
|
|
|
|
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
|
|
|
|
xa_limit_32b, &next, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, id != base + 1);
|
|
|
|
|
|
|
|
xa_for_each(xa, index, entry)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index);
|
2018-11-06 19:13:35 +00:00
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2018-10-26 18:43:22 +00:00
|
|
|
static DEFINE_XARRAY_ALLOC(xa0);
|
|
|
|
static DEFINE_XARRAY_ALLOC1(xa1);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xa_alloc(struct kunit *test)
|
2018-10-26 18:43:22 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
check_xa_alloc_1(test, &xa0, 0);
|
|
|
|
check_xa_alloc_1(test, &xa1, 1);
|
|
|
|
check_xa_alloc_2(test, &xa0, 0);
|
|
|
|
check_xa_alloc_2(test, &xa1, 1);
|
|
|
|
check_xa_alloc_3(test, &xa0, 0);
|
|
|
|
check_xa_alloc_3(test, &xa1, 1);
|
2018-07-04 14:50:12 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void __check_store_iter(struct kunit *test, unsigned long start,
|
2018-06-02 02:46:02 +00:00
|
|
|
unsigned int order, unsigned int present)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_STATE_ORDER(xas, xa, start, order);
|
|
|
|
void *entry;
|
|
|
|
unsigned int count = 0;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_for_each_conflict(&xas, entry) {
|
|
|
|
XA_BUG_ON(xa, !xa_is_value(entry));
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry < xa_mk_index(start));
|
|
|
|
XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
|
2018-06-02 02:46:02 +00:00
|
|
|
count++;
|
|
|
|
}
|
2018-11-05 18:19:54 +00:00
|
|
|
xas_store(&xas, xa_mk_index(start));
|
2018-06-02 02:46:02 +00:00
|
|
|
xas_unlock(&xas);
|
|
|
|
if (xas_nomem(&xas, GFP_KERNEL)) {
|
|
|
|
count = 0;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, xas_error(&xas));
|
|
|
|
XA_BUG_ON(xa, count != present);
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_mk_index(start));
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, start);
|
2018-06-02 02:46:02 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_store_iter(struct kunit *test)
|
2018-06-02 02:46:02 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-06-02 02:46:02 +00:00
|
|
|
unsigned int i, j;
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
|
|
|
|
|
|
|
|
for (i = 0; i < max_order; i++) {
|
|
|
|
unsigned int min = 1 << i;
|
|
|
|
unsigned int max = (2 << i) - 1;
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, 0, i, 0);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, min, i, 0);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
xa_store_index(xa, min, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, min, i, 1);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
xa_store_index(xa, max, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, min, i, 1);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (j = 0; j < min; j++)
|
|
|
|
xa_store_index(xa, j, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, 0, i, min);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
for (j = 0; j < min; j++)
|
|
|
|
xa_store_index(xa, min + j, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, min, i, min);
|
2018-06-02 02:46:02 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
|
|
xa_store_index(xa, 63, GFP_KERNEL);
|
|
|
|
xa_store_index(xa, 65, GFP_KERNEL);
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_iter(test, 64, 2, 1);
|
|
|
|
xa_erase_index(test, xa, 63);
|
2018-06-02 02:46:02 +00:00
|
|
|
#endif
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_find_1(struct kunit *test, unsigned int order)
|
2017-11-14 13:30:11 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-01-18 03:00:41 +00:00
|
|
|
unsigned long multi = 3 << order;
|
|
|
|
unsigned long next = 4 << order;
|
2017-11-14 13:30:11 +00:00
|
|
|
unsigned long index;
|
|
|
|
|
2020-01-18 03:00:41 +00:00
|
|
|
xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
|
2020-01-18 03:13:21 +00:00
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
|
2017-11-14 13:30:11 +00:00
|
|
|
|
|
|
|
index = 0;
|
|
|
|
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
2020-01-18 03:00:41 +00:00
|
|
|
xa_mk_value(multi));
|
|
|
|
XA_BUG_ON(xa, index != multi);
|
|
|
|
index = multi + 1;
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
2020-01-18 03:00:41 +00:00
|
|
|
xa_mk_value(multi));
|
|
|
|
XA_BUG_ON(xa, (index < multi) || (index >= next));
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
2020-01-18 03:00:41 +00:00
|
|
|
xa_mk_value(next));
|
|
|
|
XA_BUG_ON(xa, index != next);
|
2020-01-18 03:13:21 +00:00
|
|
|
XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
|
|
|
|
XA_BUG_ON(xa, index != next);
|
2017-11-14 13:30:11 +00:00
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, multi);
|
|
|
|
xa_erase_index(test, xa, next);
|
|
|
|
xa_erase_index(test, xa, next + 1);
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_find_2(struct kunit *test)
|
2017-11-14 13:30:11 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-14 13:30:11 +00:00
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
|
|
|
|
unsigned int i, j;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
for (i = 0; i < max_order; i++) {
|
|
|
|
unsigned long index = 1UL << i;
|
|
|
|
for (j = 0; j < index; j++) {
|
|
|
|
XA_STATE(xas, xa, j + index);
|
|
|
|
xa_store_index(xa, index - 1, GFP_KERNEL);
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_order(xa, index, i, xa_mk_index(index),
|
2017-11-14 13:30:11 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index);
|
2017-11-14 13:30:11 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, index - 1);
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_multi_find_3(struct kunit *test)
|
2020-01-31 10:07:55 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-01-31 10:07:55 +00:00
|
|
|
unsigned int order;
|
|
|
|
|
|
|
|
for (order = 5; order < order_limit; order++) {
|
|
|
|
unsigned long index = 1UL << (order - 5);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
2020-01-31 10:07:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find_1(struct kunit *test)
|
2017-11-14 13:30:11 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-14 13:30:11 +00:00
|
|
|
unsigned long i, j, k;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check xa_find with all pairs between 0 and 99 inclusive,
|
|
|
|
* starting at every index between 0 and 99
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
|
|
|
|
xa_set_mark(xa, i, XA_MARK_0);
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
|
|
|
|
NULL);
|
|
|
|
xa_set_mark(xa, j, XA_MARK_0);
|
|
|
|
for (k = 0; k < 100; k++) {
|
|
|
|
unsigned long index = k;
|
|
|
|
void *entry = xa_find(xa, &index, ULONG_MAX,
|
|
|
|
XA_PRESENT);
|
|
|
|
if (k <= j)
|
|
|
|
XA_BUG_ON(xa, index != j);
|
|
|
|
else if (k <= i)
|
|
|
|
XA_BUG_ON(xa, index != i);
|
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
|
|
|
|
index = k;
|
|
|
|
entry = xa_find(xa, &index, ULONG_MAX,
|
|
|
|
XA_MARK_0);
|
|
|
|
if (k <= j)
|
|
|
|
XA_BUG_ON(xa, index != j);
|
|
|
|
else if (k <= i)
|
|
|
|
XA_BUG_ON(xa, index != i);
|
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
}
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, j);
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
|
|
|
|
}
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-11-14 13:30:11 +00:00
|
|
|
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
2018-11-01 20:55:19 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find_2(struct kunit *test)
|
2018-11-01 20:55:19 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-11-01 20:55:19 +00:00
|
|
|
void *entry;
|
2018-12-17 19:45:36 +00:00
|
|
|
unsigned long i, j, index;
|
2018-11-01 20:55:19 +00:00
|
|
|
|
2018-12-17 19:45:36 +00:00
|
|
|
xa_for_each(xa, index, entry) {
|
2018-11-01 20:55:19 +00:00
|
|
|
XA_BUG_ON(xa, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 1024; i++) {
|
|
|
|
xa_store_index(xa, index, GFP_KERNEL);
|
|
|
|
j = 0;
|
2018-12-17 19:45:36 +00:00
|
|
|
xa_for_each(xa, index, entry) {
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_mk_index(index) != entry);
|
2018-11-01 20:55:19 +00:00
|
|
|
XA_BUG_ON(xa, index != j++);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find_3(struct kunit *test)
|
2018-12-13 18:57:42 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-12-13 18:57:42 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
unsigned long i, j, k;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
for (j = 0; j < 100; j++) {
|
2018-12-17 22:37:25 +00:00
|
|
|
rcu_read_lock();
|
2018-12-13 18:57:42 +00:00
|
|
|
for (k = 0; k < 100; k++) {
|
|
|
|
xas_set(&xas, j);
|
|
|
|
xas_for_each_marked(&xas, entry, k, XA_MARK_0)
|
|
|
|
;
|
|
|
|
if (j > k)
|
|
|
|
XA_BUG_ON(xa,
|
|
|
|
xas.xa_node != XAS_RESTART);
|
|
|
|
}
|
2018-12-17 22:37:25 +00:00
|
|
|
rcu_read_unlock();
|
2018-12-13 18:57:42 +00:00
|
|
|
}
|
|
|
|
xa_store_index(xa, i, GFP_KERNEL);
|
|
|
|
xa_set_mark(xa, i, XA_MARK_0);
|
|
|
|
}
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find_4(struct kunit *test)
|
2020-01-17 22:45:12 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-01-17 22:45:12 +00:00
|
|
|
unsigned long index = 0;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
|
|
|
|
|
|
|
|
entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
|
|
|
|
|
|
|
|
entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
|
|
|
|
XA_BUG_ON(xa, entry);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, ULONG_MAX);
|
2020-01-17 22:45:12 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find(struct kunit *test)
|
2018-11-01 20:55:19 +00:00
|
|
|
{
|
2020-01-18 03:00:41 +00:00
|
|
|
unsigned i;
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_find_1(test);
|
|
|
|
check_find_2(test);
|
|
|
|
check_find_3(test);
|
|
|
|
check_find_4(test);
|
2020-01-18 03:00:41 +00:00
|
|
|
|
|
|
|
for (i = 2; i < 10; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
check_multi_find_1(test, i);
|
|
|
|
check_multi_find_2(test);
|
|
|
|
check_multi_find_3(test);
|
2017-11-14 13:30:11 +00:00
|
|
|
}
|
|
|
|
|
2017-11-22 13:36:00 +00:00
|
|
|
/* See find_swap_entry() in mm/shmem.c */
|
|
|
|
static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
|
|
|
|
{
|
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
unsigned int checked = 0;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
if (xas_retry(&xas, entry))
|
|
|
|
continue;
|
|
|
|
if (entry == item)
|
|
|
|
break;
|
|
|
|
checked++;
|
|
|
|
if ((checked % 4) != 0)
|
|
|
|
continue;
|
|
|
|
xas_pause(&xas);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return entry ? xas.xa_index : -1;
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_find_entry(struct kunit *test)
|
2017-11-22 13:36:00 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-22 13:36:00 +00:00
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long offset, index;
|
|
|
|
|
|
|
|
for (order = 0; order < 20; order++) {
|
|
|
|
for (offset = 0; offset < (1UL << (order + 3));
|
|
|
|
offset += (1UL << order)) {
|
|
|
|
for (index = 0; index < (1UL << (order + 5));
|
|
|
|
index += (1UL << order)) {
|
|
|
|
xa_store_order(xa, index, order,
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_mk_index(index), GFP_KERNEL);
|
2017-11-22 13:36:00 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, index) !=
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_mk_index(index));
|
2017-11-22 13:36:00 +00:00
|
|
|
XA_BUG_ON(xa, xa_find_entry(xa,
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_mk_index(index)) != index);
|
2017-11-22 13:36:00 +00:00
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
|
|
|
|
xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, ULONG_MAX);
|
2017-11-22 13:36:00 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_pause(struct kunit *test)
|
2020-01-31 11:17:09 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-01-31 11:17:09 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
void *entry;
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long index = 1;
|
|
|
|
unsigned int count = 0;
|
|
|
|
|
|
|
|
for (order = 0; order < order_limit; order++) {
|
|
|
|
XA_BUG_ON(xa, xa_store_order(xa, index, order,
|
|
|
|
xa_mk_index(index), GFP_KERNEL));
|
|
|
|
index += 1UL << order;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, count != order_limit);
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
|
|
|
|
count++;
|
|
|
|
xas_pause(&xas);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, count != order_limit);
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
Xarray: move forward index correctly in xas_pause()
After xas_load(), xas->index could point to mid of found multi-index entry
and xas->index's bits under node->shift maybe non-zero. The afterward
xas_pause() will move forward xas->index with xa->node->shift with bits
under node->shift un-masked and thus skip some index unexpectedly.
Consider following case:
Assume XA_CHUNK_SHIFT is 4.
xa_store_range(xa, 16, 31, ...)
xa_store(xa, 32, ...)
XA_STATE(xas, xa, 17);
xas_for_each(&xas,...)
xas_load(&xas)
/* xas->index = 17, xas->xa_offset = 1, xas->xa_node->xa_shift = 4 */
xas_pause()
/* xas->index = 33, xas->xa_offset = 2, xas->xa_node->xa_shift = 4 */
As we can see, index of 32 is skipped unexpectedly.
Fix this by mask bit under node->xa_shift when move forward index in
xas_pause().
For now, this will not cause serious problems. Only minor problem like
cachestat return less number of page status could happen.
Link: https://lkml.kernel.org/r/20241213122523.12764-3-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-12-13 12:25:20 +00:00
|
|
|
|
|
|
|
index = 0;
|
|
|
|
for (order = XA_CHUNK_SHIFT; order > 0; order--) {
|
|
|
|
XA_BUG_ON(xa, xa_store_order(xa, index, order,
|
|
|
|
xa_mk_index(index), GFP_KERNEL));
|
|
|
|
index += 1UL << order;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = 0;
|
|
|
|
count = 0;
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
|
|
|
index += 1UL << (XA_CHUNK_SHIFT - count);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
|
|
|
|
|
|
|
|
index = 0;
|
|
|
|
count = 0;
|
|
|
|
xas_set(&xas, XA_CHUNK_SIZE / 2 + 1);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, entry, ULONG_MAX) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(index));
|
|
|
|
index += 1UL << (XA_CHUNK_SHIFT - count);
|
|
|
|
count++;
|
|
|
|
xas_pause(&xas);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
|
2020-01-31 11:17:09 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_move_tiny(struct kunit *test)
|
2019-07-01 21:03:29 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2019-07-01 21:03:29 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != NULL);
|
|
|
|
rcu_read_unlock();
|
|
|
|
xa_store_index(xa, 0, GFP_KERNEL);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != NULL);
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
|
|
|
|
XA_BUG_ON(xa, xas_prev(&xas) != NULL);
|
|
|
|
rcu_read_unlock();
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
2019-07-01 21:03:29 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_move_max(struct kunit *test)
|
2019-11-08 03:49:11 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2019-11-08 03:49:11 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
|
|
|
|
xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
|
|
|
|
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
xas_set(&xas, 0);
|
|
|
|
rcu_read_lock();
|
|
|
|
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
|
|
|
|
xas_pause(&xas);
|
|
|
|
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, ULONG_MAX);
|
2019-11-08 03:49:11 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_move_small(struct kunit *test, unsigned long idx)
|
2017-12-01 05:06:52 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-12-01 05:06:52 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
xa_store_index(xa, 0, GFP_KERNEL);
|
|
|
|
xa_store_index(xa, idx, GFP_KERNEL);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < idx * 4; i++) {
|
|
|
|
void *entry = xas_next(&xas);
|
|
|
|
if (i <= idx)
|
|
|
|
XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != i);
|
|
|
|
if (i == 0 || i == idx)
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
}
|
|
|
|
xas_next(&xas);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != i);
|
|
|
|
|
|
|
|
do {
|
|
|
|
void *entry = xas_prev(&xas);
|
|
|
|
i--;
|
|
|
|
if (i <= idx)
|
|
|
|
XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != i);
|
|
|
|
if (i == 0 || i == idx)
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
} while (i > 0);
|
|
|
|
|
|
|
|
xas_set(&xas, ULONG_MAX);
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
|
|
|
|
XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != 0);
|
|
|
|
XA_BUG_ON(xa, xas_prev(&xas) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, 0);
|
|
|
|
xa_erase_index(test, xa, idx);
|
2017-12-01 05:06:52 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_move(struct kunit *test)
|
2017-12-01 05:06:52 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-12-01 05:06:52 +00:00
|
|
|
XA_STATE(xas, xa, (1 << 16) - 1);
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
for (i = 0; i < (1 << 16); i++)
|
|
|
|
XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
do {
|
|
|
|
void *entry = xas_prev(&xas);
|
|
|
|
i--;
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
XA_BUG_ON(xa, i != xas.xa_index);
|
|
|
|
} while (i != 0);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xas_prev(&xas) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
|
|
|
|
|
|
|
|
do {
|
|
|
|
void *entry = xas_next(&xas);
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
XA_BUG_ON(xa, i != xas.xa_index);
|
|
|
|
i++;
|
|
|
|
} while (i < (1 << 16));
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
for (i = (1 << 8); i < (1 << 15); i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-12-01 05:06:52 +00:00
|
|
|
|
|
|
|
i = xas.xa_index;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
do {
|
|
|
|
void *entry = xas_prev(&xas);
|
|
|
|
i--;
|
|
|
|
if ((i < (1 << 8)) || (i >= (1 << 15)))
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
XA_BUG_ON(xa, i != xas.xa_index);
|
|
|
|
} while (i != 0);
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xas_prev(&xas) != NULL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
|
|
|
|
|
|
|
|
do {
|
|
|
|
void *entry = xas_next(&xas);
|
|
|
|
if ((i < (1 << 8)) || (i >= (1 << 15)))
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, entry != xa_mk_index(i));
|
2017-12-01 05:06:52 +00:00
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, entry != NULL);
|
|
|
|
XA_BUG_ON(xa, i != xas.xa_index);
|
|
|
|
i++;
|
|
|
|
} while (i < (1 << 16));
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_move_tiny(test);
|
|
|
|
check_move_max(test);
|
2019-07-01 21:03:29 +00:00
|
|
|
|
2017-12-01 05:06:52 +00:00
|
|
|
for (i = 0; i < 16; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
check_move_small(test, 1UL << i);
|
2017-12-01 05:06:52 +00:00
|
|
|
|
|
|
|
for (i = 2; i < 16; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
check_move_small(test, (1UL << i) - 1);
|
2017-12-01 05:06:52 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa,
|
2017-12-04 05:11:48 +00:00
|
|
|
unsigned long index, unsigned order)
|
|
|
|
{
|
|
|
|
XA_STATE_ORDER(xas, xa, index, order);
|
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
XA_BUG_ON(xa, xas_find_conflict(&xas));
|
|
|
|
xas_create_range(&xas);
|
|
|
|
if (xas_error(&xas))
|
|
|
|
goto unlock;
|
|
|
|
for (i = 0; i < (1U << order); i++) {
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
|
2017-12-04 05:11:48 +00:00
|
|
|
xas_next(&xas);
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xas_error(&xas));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range_1(struct kunit *test,
|
2017-12-04 05:11:48 +00:00
|
|
|
unsigned long index, unsigned order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-12-04 05:11:48 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_store_many_order(test, xa, index, order);
|
2017-12-04 05:11:48 +00:00
|
|
|
for (i = index; i < index + (1UL << order); i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-12-04 05:11:48 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range_2(struct kunit *test, unsigned int order)
|
2017-12-04 05:11:48 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-12-04 05:11:48 +00:00
|
|
|
unsigned long i;
|
|
|
|
unsigned long nr = 1UL << order;
|
|
|
|
|
|
|
|
for (i = 0; i < nr * nr; i += nr)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_store_many_order(test, xa, i, order);
|
2017-12-04 05:11:48 +00:00
|
|
|
for (i = 0; i < nr * nr; i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-12-04 05:11:48 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range_3(struct kunit *test)
|
2017-12-04 05:11:48 +00:00
|
|
|
{
|
|
|
|
XA_STATE(xas, NULL, 0);
|
|
|
|
xas_set_err(&xas, -EEXIST);
|
|
|
|
xas_create_range(&xas);
|
|
|
|
XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range_4(struct kunit *test,
|
2017-12-04 05:11:48 +00:00
|
|
|
unsigned long index, unsigned order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-12-04 05:11:48 +00:00
|
|
|
XA_STATE_ORDER(xas, xa, index, order);
|
|
|
|
unsigned long base = xas.xa_index;
|
|
|
|
unsigned long i = 0;
|
|
|
|
|
|
|
|
xa_store_index(xa, index, GFP_KERNEL);
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_create_range(&xas);
|
|
|
|
if (xas_error(&xas))
|
|
|
|
goto unlock;
|
|
|
|
for (i = 0; i < (1UL << order); i++) {
|
2018-11-05 18:19:54 +00:00
|
|
|
void *old = xas_store(&xas, xa_mk_index(base + i));
|
2017-12-04 05:11:48 +00:00
|
|
|
if (xas.xa_index == index)
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, old != xa_mk_index(base + i));
|
2017-12-04 05:11:48 +00:00
|
|
|
else
|
|
|
|
XA_BUG_ON(xa, old != NULL);
|
|
|
|
xas_next(&xas);
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, xas_error(&xas));
|
|
|
|
|
|
|
|
for (i = base; i < base + (1UL << order); i++)
|
2024-12-05 15:11:26 +00:00
|
|
|
xa_erase_index(test, xa, i);
|
2017-12-04 05:11:48 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range_5(struct kunit *test,
|
2022-03-28 23:25:11 +00:00
|
|
|
unsigned long index, unsigned int order)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2022-03-28 23:25:11 +00:00
|
|
|
XA_STATE_ORDER(xas, xa, index, order);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
|
|
|
|
|
|
|
|
for (i = 0; i < order + 10; i++) {
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_create_range(&xas);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
}
|
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_create_range(struct kunit *test)
|
2017-12-04 05:11:48 +00:00
|
|
|
{
|
|
|
|
unsigned int order;
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
|
|
|
|
|
|
|
|
for (order = 0; order < max_order; order++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
check_create_range_1(test, 0, order);
|
|
|
|
check_create_range_1(test, 1U << order, order);
|
|
|
|
check_create_range_1(test, 2U << order, order);
|
|
|
|
check_create_range_1(test, 3U << order, order);
|
|
|
|
check_create_range_1(test, 1U << 24, order);
|
2017-12-04 05:11:48 +00:00
|
|
|
if (order < 10)
|
2024-12-05 15:11:26 +00:00
|
|
|
check_create_range_2(test, order);
|
|
|
|
|
|
|
|
check_create_range_4(test, 0, order);
|
|
|
|
check_create_range_4(test, 1U << order, order);
|
|
|
|
check_create_range_4(test, 2U << order, order);
|
|
|
|
check_create_range_4(test, 3U << order, order);
|
|
|
|
check_create_range_4(test, 1U << 24, order);
|
|
|
|
|
|
|
|
check_create_range_4(test, 1, order);
|
|
|
|
check_create_range_4(test, (1U << order) + 1, order);
|
|
|
|
check_create_range_4(test, (2U << order) + 1, order);
|
|
|
|
check_create_range_4(test, (2U << order) - 1, order);
|
|
|
|
check_create_range_4(test, (3U << order) + 1, order);
|
|
|
|
check_create_range_4(test, (3U << order) - 1, order);
|
|
|
|
check_create_range_4(test, (1U << 24) + 1, order);
|
|
|
|
|
|
|
|
check_create_range_5(test, 0, order);
|
|
|
|
check_create_range_5(test, (1U << order), order);
|
2017-12-04 05:11:48 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_create_range_3(test);
|
2017-12-04 05:11:48 +00:00
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void __check_store_range(struct kunit *test, unsigned long first,
|
2018-08-15 18:13:29 +00:00
|
|
|
unsigned long last)
|
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-08-15 18:13:29 +00:00
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
|
2018-08-15 18:13:29 +00:00
|
|
|
|
2018-11-05 18:19:54 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
|
2018-08-15 18:13:29 +00:00
|
|
|
XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
|
|
|
|
|
|
|
|
xa_store_range(xa, first, last, NULL, GFP_KERNEL);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_store_range(struct kunit *test)
|
2018-08-15 18:13:29 +00:00
|
|
|
{
|
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < 128; i++) {
|
|
|
|
for (j = i; j < 128; j++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
__check_store_range(test, i, j);
|
|
|
|
__check_store_range(test, 128 + i, 128 + j);
|
|
|
|
__check_store_range(test, 4095 + i, 4095 + j);
|
|
|
|
__check_store_range(test, 4096 + i, 4096 + j);
|
|
|
|
__check_store_range(test, 123456 + i, 123456 + j);
|
|
|
|
__check_store_range(test, (1 << 24) + i, (1 << 24) + j);
|
2018-08-15 18:13:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-16 03:05:16 +00:00
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
static void check_split_1(struct kunit *test, unsigned long index,
|
2020-11-19 13:32:31 +00:00
|
|
|
unsigned int order, unsigned int new_order)
|
2020-10-16 03:05:16 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-11-19 13:32:31 +00:00
|
|
|
XA_STATE_ORDER(xas, xa, index, new_order);
|
2024-05-01 15:31:18 +00:00
|
|
|
unsigned int i, found;
|
|
|
|
void *entry;
|
2020-10-16 03:05:16 +00:00
|
|
|
|
|
|
|
xa_store_order(xa, index, order, xa, GFP_KERNEL);
|
2024-05-01 15:31:18 +00:00
|
|
|
xa_set_mark(xa, index, XA_MARK_1);
|
2020-10-16 03:05:16 +00:00
|
|
|
|
|
|
|
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_split(&xas, xa, order);
|
2020-11-19 13:32:31 +00:00
|
|
|
for (i = 0; i < (1 << order); i += (1 << new_order))
|
|
|
|
__xa_store(xa, index + i, xa_mk_index(index + i), 0);
|
2020-10-16 03:05:16 +00:00
|
|
|
xas_unlock(&xas);
|
|
|
|
|
2020-11-19 13:32:31 +00:00
|
|
|
for (i = 0; i < (1 << order); i++) {
|
|
|
|
unsigned int val = index + (i & ~((1 << new_order) - 1));
|
|
|
|
XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
|
2020-10-16 03:05:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
xa_set_mark(xa, index, XA_MARK_0);
|
|
|
|
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
|
2024-05-01 15:31:18 +00:00
|
|
|
|
|
|
|
xas_set_order(&xas, index, 0);
|
|
|
|
found = 0;
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
|
|
|
|
found++;
|
|
|
|
XA_BUG_ON(xa, xa_is_internal(entry));
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
XA_BUG_ON(xa, found != 1 << (order - new_order));
|
2020-10-16 03:05:16 +00:00
|
|
|
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_split(struct kunit *test)
|
2020-10-16 03:05:16 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-11-19 13:32:31 +00:00
|
|
|
unsigned int order, new_order;
|
2020-10-16 03:05:16 +00:00
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
|
2020-11-19 13:32:31 +00:00
|
|
|
for (new_order = 0; new_order < order; new_order++) {
|
2024-12-05 15:11:26 +00:00
|
|
|
check_split_1(test, 0, order, new_order);
|
|
|
|
check_split_1(test, 1UL << order, order, new_order);
|
|
|
|
check_split_1(test, 3UL << order, order, new_order);
|
2020-11-19 13:32:31 +00:00
|
|
|
}
|
2020-10-16 03:05:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2024-12-05 15:11:26 +00:00
|
|
|
static void check_split(struct kunit *test) { }
|
2020-10-16 03:05:16 +00:00
|
|
|
#endif
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static void check_align_1(struct kunit *test, char *name)
|
2018-12-29 04:20:44 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-12-29 04:20:44 +00:00
|
|
|
int i;
|
|
|
|
unsigned int id;
|
|
|
|
unsigned long index;
|
|
|
|
void *entry;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
2018-12-31 15:41:01 +00:00
|
|
|
XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
|
|
|
|
GFP_KERNEL) != 0);
|
2018-12-29 04:20:44 +00:00
|
|
|
XA_BUG_ON(xa, id != i);
|
|
|
|
}
|
|
|
|
xa_for_each(xa, index, entry)
|
|
|
|
XA_BUG_ON(xa, xa_is_err(entry));
|
|
|
|
xa_destroy(xa);
|
|
|
|
}
|
|
|
|
|
2019-02-21 22:54:44 +00:00
|
|
|
/*
|
|
|
|
* We should always be able to store without allocating memory after
|
|
|
|
* reserving a slot.
|
|
|
|
*/
|
2024-12-05 15:11:26 +00:00
|
|
|
static void check_align_2(struct kunit *test, char *name)
|
2019-02-21 22:36:45 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2019-02-21 22:36:45 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
|
|
|
|
xa_erase(xa, 0);
|
|
|
|
}
|
|
|
|
|
2019-02-21 22:54:44 +00:00
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
|
|
|
|
XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
|
|
|
|
xa_erase(xa, 0);
|
|
|
|
}
|
|
|
|
|
2019-02-21 22:36:45 +00:00
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_align(struct kunit *test)
|
2018-12-29 04:20:44 +00:00
|
|
|
{
|
|
|
|
char name[] = "Motorola 68000";
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
check_align_1(test, name);
|
|
|
|
check_align_1(test, name + 1);
|
|
|
|
check_align_1(test, name + 2);
|
|
|
|
check_align_1(test, name + 3);
|
|
|
|
check_align_2(test, name);
|
2018-12-29 04:20:44 +00:00
|
|
|
}
|
|
|
|
|
2017-11-24 19:24:59 +00:00
|
|
|
static LIST_HEAD(shadow_nodes);
|
|
|
|
|
|
|
|
static void test_update_node(struct xa_node *node)
|
|
|
|
{
|
|
|
|
if (node->count && node->count == node->nr_values) {
|
|
|
|
if (list_empty(&node->private_list))
|
|
|
|
list_add(&shadow_nodes, &node->private_list);
|
|
|
|
} else {
|
|
|
|
if (!list_empty(&node->private_list))
|
|
|
|
list_del_init(&node->private_list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void shadow_remove(struct kunit *test, struct xarray *xa)
|
2017-11-24 19:24:59 +00:00
|
|
|
{
|
|
|
|
struct xa_node *node;
|
|
|
|
|
|
|
|
xa_lock(xa);
|
|
|
|
while ((node = list_first_entry_or_null(&shadow_nodes,
|
|
|
|
struct xa_node, private_list))) {
|
|
|
|
XA_BUG_ON(xa, node->array != xa);
|
|
|
|
list_del_init(&node->private_list);
|
2020-08-18 13:05:56 +00:00
|
|
|
xa_delete_node(node, test_update_node);
|
2017-11-24 19:24:59 +00:00
|
|
|
}
|
|
|
|
xa_unlock(xa);
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
struct workingset_testcase {
|
|
|
|
struct xarray *xa;
|
|
|
|
unsigned long index;
|
|
|
|
};
|
|
|
|
|
|
|
|
static noinline void check_workingset(struct kunit *test)
|
2017-11-24 19:24:59 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value;
|
|
|
|
struct xarray *xa = tc.xa;
|
|
|
|
unsigned long index = tc.index;
|
|
|
|
|
2017-11-24 19:24:59 +00:00
|
|
|
XA_STATE(xas, xa, index);
|
|
|
|
xas_set_update(&xas, test_update_node);
|
|
|
|
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, xa_mk_value(0));
|
|
|
|
xas_next(&xas);
|
|
|
|
xas_store(&xas, xa_mk_value(1));
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, list_empty(&shadow_nodes));
|
|
|
|
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_next(&xas);
|
|
|
|
xas_store(&xas, &xas);
|
|
|
|
XA_BUG_ON(xa, !list_empty(&shadow_nodes));
|
|
|
|
|
|
|
|
xas_store(&xas, xa_mk_value(2));
|
|
|
|
xas_unlock(&xas);
|
|
|
|
XA_BUG_ON(xa, list_empty(&shadow_nodes));
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
shadow_remove(test, xa);
|
2017-11-24 19:24:59 +00:00
|
|
|
XA_BUG_ON(xa, !list_empty(&shadow_nodes));
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
2018-08-28 20:13:16 +00:00
|
|
|
/*
|
|
|
|
* Check that the pointer / value / sibling entries are accounted the
|
|
|
|
* way we expect them to be.
|
|
|
|
*/
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_account(struct kunit *test)
|
2018-08-28 20:13:16 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2018-08-28 20:13:16 +00:00
|
|
|
unsigned int order;
|
|
|
|
|
|
|
|
for (order = 1; order < 12; order++) {
|
|
|
|
XA_STATE(xas, xa, 1 << order);
|
|
|
|
|
|
|
|
xa_store_order(xa, 0, order, xa, GFP_KERNEL);
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_lock();
|
2018-08-28 20:13:16 +00:00
|
|
|
xas_load(&xas);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node->count == 0);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
|
|
|
|
XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
|
2018-11-19 14:36:29 +00:00
|
|
|
rcu_read_unlock();
|
2018-08-28 20:13:16 +00:00
|
|
|
|
2018-11-05 18:19:54 +00:00
|
|
|
xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
|
2018-08-28 20:13:16 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
|
|
|
|
|
|
|
|
xa_erase(xa, 1 << order);
|
|
|
|
XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
|
|
|
|
|
|
|
|
xa_erase(xa, 0);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_get_order(struct kunit *test)
|
2020-10-16 03:05:13 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2020-10-16 03:05:13 +00:00
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++)
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
|
|
|
|
|
|
|
|
for (order = 0; order < max_order; order++) {
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
xa_store_order(xa, i << order, order,
|
|
|
|
xa_mk_index(i << order), GFP_KERNEL);
|
|
|
|
for (j = i << order; j < (i + 1) << order; j++)
|
|
|
|
XA_BUG_ON(xa, xa_get_order(xa, j) != order);
|
|
|
|
xa_erase(xa, i << order);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xas_get_order(struct kunit *test)
|
2024-04-15 17:18:55 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2024-04-15 17:18:55 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long i, j;
|
|
|
|
|
|
|
|
for (order = 0; order < max_order; order++) {
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
xas_set_order(&xas, i << order, order);
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, xa_mk_value(i));
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
for (j = i << order; j < (i + 1) << order; j++) {
|
|
|
|
xas_set_order(&xas, j, 0);
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_load(&xas);
|
|
|
|
XA_BUG_ON(xa, xas_get_order(&xas) != order);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_set_order(&xas, i << order, order);
|
|
|
|
xas_store(&xas, NULL);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_xas_conflict_get_order(struct kunit *test)
|
mm/filemap: optimize filemap folio adding
Instead of doing multiple tree walks, do one optimism range check with
lock hold, and exit if raced with another insertion. If a shadow exists,
check it with a new xas_get_order helper before releasing the lock to
avoid redundant tree walks for getting its order.
Drop the lock and do the allocation only if a split is needed.
In the best case, it only need to walk the tree once. If it needs to
alloc and split, 3 walks are issued (One for first ranged conflict check
and order retrieving, one for the second check after allocation, one for
the insert after split).
Testing with 4K pages, in an 8G cgroup, with 16G brd as block device:
echo 3 > /proc/sys/vm/drop_caches
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap --rw=randread --time_based \
--ramp_time=30s --runtime=5m --group_reporting
Before:
bw ( MiB/s): min= 1027, max= 3520, per=100.00%, avg=2445.02, stdev=18.90, samples=8691
iops : min=263001, max=901288, avg=625924.36, stdev=4837.28, samples=8691
After (+7.3%):
bw ( MiB/s): min= 493, max= 3947, per=100.00%, avg=2625.56, stdev=25.74, samples=8651
iops : min=126454, max=1010681, avg=672142.61, stdev=6590.48, samples=8651
Test result with THP (do a THP randread then switch to 4K page in hope it
issues a lot of splitting):
echo 3 > /proc/sys/vm/drop_caches
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap -thp=1 --readonly \
--rw=randread --time_based --ramp_time=30s --runtime=10m \
--group_reporting
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap \
--rw=randread --time_based --runtime=5s --group_reporting
Before:
bw ( KiB/s): min= 4141, max=14202, per=100.00%, avg=7935.51, stdev=96.85, samples=18976
iops : min= 1029, max= 3548, avg=1979.52, stdev=24.23, samples=18976·
READ: bw=4545B/s (4545B/s), 4545B/s-4545B/s (4545B/s-4545B/s), io=64.0KiB (65.5kB), run=14419-14419msec
After (+12.5%):
bw ( KiB/s): min= 4611, max=15370, per=100.00%, avg=8928.74, stdev=105.17, samples=19146
iops : min= 1151, max= 3842, avg=2231.27, stdev=26.29, samples=19146
READ: bw=4635B/s (4635B/s), 4635B/s-4635B/s (4635B/s-4635B/s), io=64.0KiB (65.5kB), run=14137-14137msec
The performance is better for both 4K (+7.5%) and THP (+12.5%) cached read.
Link: https://lkml.kernel.org/r/20240415171857.19244-5-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-15 17:18:56 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
mm/filemap: optimize filemap folio adding
Instead of doing multiple tree walks, do one optimism range check with
lock hold, and exit if raced with another insertion. If a shadow exists,
check it with a new xas_get_order helper before releasing the lock to
avoid redundant tree walks for getting its order.
Drop the lock and do the allocation only if a split is needed.
In the best case, it only need to walk the tree once. If it needs to
alloc and split, 3 walks are issued (One for first ranged conflict check
and order retrieving, one for the second check after allocation, one for
the insert after split).
Testing with 4K pages, in an 8G cgroup, with 16G brd as block device:
echo 3 > /proc/sys/vm/drop_caches
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap --rw=randread --time_based \
--ramp_time=30s --runtime=5m --group_reporting
Before:
bw ( MiB/s): min= 1027, max= 3520, per=100.00%, avg=2445.02, stdev=18.90, samples=8691
iops : min=263001, max=901288, avg=625924.36, stdev=4837.28, samples=8691
After (+7.3%):
bw ( MiB/s): min= 493, max= 3947, per=100.00%, avg=2625.56, stdev=25.74, samples=8651
iops : min=126454, max=1010681, avg=672142.61, stdev=6590.48, samples=8651
Test result with THP (do a THP randread then switch to 4K page in hope it
issues a lot of splitting):
echo 3 > /proc/sys/vm/drop_caches
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap -thp=1 --readonly \
--rw=randread --time_based --ramp_time=30s --runtime=10m \
--group_reporting
fio -name=cached --numjobs=16 --filename=/mnt/test.img \
--buffered=1 --ioengine=mmap \
--rw=randread --time_based --runtime=5s --group_reporting
Before:
bw ( KiB/s): min= 4141, max=14202, per=100.00%, avg=7935.51, stdev=96.85, samples=18976
iops : min= 1029, max= 3548, avg=1979.52, stdev=24.23, samples=18976·
READ: bw=4545B/s (4545B/s), 4545B/s-4545B/s (4545B/s-4545B/s), io=64.0KiB (65.5kB), run=14419-14419msec
After (+12.5%):
bw ( KiB/s): min= 4611, max=15370, per=100.00%, avg=8928.74, stdev=105.17, samples=19146
iops : min= 1151, max= 3842, avg=2231.27, stdev=26.29, samples=19146
READ: bw=4635B/s (4635B/s), 4635B/s-4635B/s (4635B/s-4635B/s), io=64.0KiB (65.5kB), run=14137-14137msec
The performance is better for both 4K (+7.5%) and THP (+12.5%) cached read.
Link: https://lkml.kernel.org/r/20240415171857.19244-5-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-15 17:18:56 +00:00
|
|
|
XA_STATE(xas, xa, 0);
|
|
|
|
|
|
|
|
void *entry;
|
|
|
|
int only_once;
|
|
|
|
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long i, j, k;
|
|
|
|
|
|
|
|
for (order = 0; order < max_order; order++) {
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
xas_set_order(&xas, i << order, order);
|
|
|
|
do {
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, xa_mk_value(i));
|
|
|
|
xas_unlock(&xas);
|
|
|
|
} while (xas_nomem(&xas, GFP_KERNEL));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure xas_get_order works with xas_for_each_conflict.
|
|
|
|
*/
|
|
|
|
j = i << order;
|
|
|
|
for (k = 0; k < order; k++) {
|
|
|
|
only_once = 0;
|
|
|
|
xas_set_order(&xas, j + (1 << k), k);
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_for_each_conflict(&xas, entry) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_value(i));
|
|
|
|
XA_BUG_ON(xa, xas_get_order(&xas) != order);
|
|
|
|
only_once++;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, only_once != 1);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (order < max_order - 1) {
|
|
|
|
only_once = 0;
|
|
|
|
xas_set_order(&xas, (i & ~1UL) << order, order + 1);
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_for_each_conflict(&xas, entry) {
|
|
|
|
XA_BUG_ON(xa, entry != xa_mk_value(i));
|
|
|
|
XA_BUG_ON(xa, xas_get_order(&xas) != order);
|
|
|
|
only_once++;
|
|
|
|
}
|
|
|
|
XA_BUG_ON(xa, only_once != 1);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
}
|
|
|
|
|
|
|
|
xas_set_order(&xas, i << order, order);
|
|
|
|
xas_lock(&xas);
|
|
|
|
xas_store(&xas, NULL);
|
|
|
|
xas_unlock(&xas);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-12-05 15:11:26 +00:00
|
|
|
static noinline void check_destroy(struct kunit *test)
|
2017-11-17 13:16:34 +00:00
|
|
|
{
|
2024-12-05 15:11:26 +00:00
|
|
|
struct xarray *xa = xa_param(test);
|
|
|
|
|
2017-11-17 13:16:34 +00:00
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* Destroying an empty array is a no-op */
|
|
|
|
xa_destroy(xa);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
/* Destroying an array with a single entry */
|
|
|
|
for (index = 0; index < 1000; index++) {
|
|
|
|
xa_store_index(xa, index, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
xa_destroy(xa);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Destroying an array with a single entry at ULONG_MAX */
|
|
|
|
xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
xa_destroy(xa);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
|
|
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
|
|
/* Destroying an array with a multi-index entry */
|
|
|
|
xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
|
|
|
|
XA_BUG_ON(xa, xa_empty(xa));
|
|
|
|
xa_destroy(xa);
|
|
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-11-10 20:15:08 +00:00
|
|
|
static DEFINE_XARRAY(array);
|
2024-12-05 15:11:26 +00:00
|
|
|
static struct xarray *arrays[] = { &array };
|
|
|
|
KUNIT_ARRAY_PARAM(array, arrays, NULL);
|
|
|
|
|
|
|
|
static struct xarray *xa0s[] = { &xa0 };
|
|
|
|
KUNIT_ARRAY_PARAM(xa0, xa0s, NULL);
|
|
|
|
|
|
|
|
static struct workingset_testcase workingset_testcases[] = {
|
|
|
|
{ &array, 0 },
|
|
|
|
{ &array, 64 },
|
|
|
|
{ &array, 4096 },
|
|
|
|
};
|
|
|
|
KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL);
|
|
|
|
|
|
|
|
static struct kunit_case xarray_cases[] = {
|
|
|
|
KUNIT_CASE_PARAM(check_xa_err, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xas_retry, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xa_load, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xa_mark, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xas_erase, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_insert, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_reserve, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_reserve, xa0_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_multi_store, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_get_order, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params),
|
|
|
|
KUNIT_CASE(check_xa_alloc),
|
|
|
|
KUNIT_CASE_PARAM(check_find, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_find_entry, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_pause, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_account, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_destroy, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_move, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_create_range, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_store_range, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_store_iter, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_align, xa0_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_split, array_gen_params),
|
|
|
|
KUNIT_CASE_PARAM(check_workingset, workingset_gen_params),
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kunit_suite xarray_suite = {
|
|
|
|
.name = "xarray",
|
|
|
|
.test_cases = xarray_cases,
|
|
|
|
};
|
|
|
|
|
|
|
|
kunit_test_suite(xarray_suite);
|
2017-11-07 19:57:46 +00:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
|
2024-06-01 03:08:37 +00:00
|
|
|
MODULE_DESCRIPTION("XArray API test module");
|
2017-11-07 19:57:46 +00:00
|
|
|
MODULE_LICENSE("GPL");
|