2022-02-02 12:03:10 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#ifndef _MEMBLOCK_TEST_H
|
|
|
|
#define _MEMBLOCK_TEST_H
|
|
|
|
|
2022-02-28 15:46:44 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <assert.h>
|
2022-02-02 12:03:10 +01:00
|
|
|
#include <linux/types.h>
|
2023-09-14 10:45:40 +03:00
|
|
|
#include <linux/seq_file.h>
|
2022-02-02 12:03:10 +01:00
|
|
|
#include <linux/memblock.h>
|
2022-02-28 15:46:44 +01:00
|
|
|
#include <linux/sizes.h>
|
2022-07-03 23:06:56 -05:00
|
|
|
#include <linux/printk.h>
|
|
|
|
#include <../selftests/kselftest.h>
|
2022-02-28 15:46:44 +01:00
|
|
|
|
2022-10-11 14:21:20 +08:00
|
|
|
#define MEM_SIZE SZ_32K
|
2024-05-07 07:58:28 +00:00
|
|
|
#define PHYS_MEM_SIZE SZ_16M
|
2022-09-13 00:21:09 -05:00
|
|
|
#define NUMA_NODES 8
|
2022-02-28 15:46:44 +01:00
|
|
|
|
2022-10-11 14:21:20 +08:00
|
|
|
#define INIT_MEMBLOCK_REGIONS 128
|
|
|
|
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
|
|
|
|
|
2022-08-27 00:42:47 -05:00
|
|
|
enum test_flags {
|
|
|
|
/* No special request. */
|
|
|
|
TEST_F_NONE = 0x0,
|
|
|
|
/* Perform raw allocations (no zeroing of memory). */
|
|
|
|
TEST_F_RAW = 0x1,
|
2022-11-07 00:28:05 -06:00
|
|
|
/* Perform allocations on the exact node specified. */
|
|
|
|
TEST_F_EXACT = 0x2
|
2022-08-27 00:42:47 -05:00
|
|
|
};
|
|
|
|
|
2022-07-03 23:06:56 -05:00
|
|
|
/**
|
|
|
|
* ASSERT_EQ():
|
|
|
|
* Check the condition
|
|
|
|
* @_expected == @_seen
|
2022-08-14 00:50:20 -05:00
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
2022-07-03 23:06:56 -05:00
|
|
|
*/
|
|
|
|
#define ASSERT_EQ(_expected, _seen) do { \
|
|
|
|
if ((_expected) != (_seen)) \
|
|
|
|
test_fail(); \
|
|
|
|
assert((_expected) == (_seen)); \
|
|
|
|
} while (0)
|
|
|
|
|
2024-05-07 07:58:30 +00:00
|
|
|
#define ASSERT_TRUE(_seen) ASSERT_EQ(true, _seen)
|
|
|
|
#define ASSERT_FALSE(_seen) ASSERT_EQ(false, _seen)
|
|
|
|
|
2022-07-03 23:06:56 -05:00
|
|
|
/**
|
|
|
|
* ASSERT_NE():
|
|
|
|
* Check the condition
|
|
|
|
* @_expected != @_seen
|
2022-08-14 00:50:20 -05:00
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
2022-07-03 23:06:56 -05:00
|
|
|
*/
|
|
|
|
#define ASSERT_NE(_expected, _seen) do { \
|
|
|
|
if ((_expected) == (_seen)) \
|
|
|
|
test_fail(); \
|
|
|
|
assert((_expected) != (_seen)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ASSERT_LT():
|
|
|
|
* Check the condition
|
|
|
|
* @_expected < @_seen
|
2022-08-14 00:50:20 -05:00
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
2022-07-03 23:06:56 -05:00
|
|
|
*/
|
|
|
|
#define ASSERT_LT(_expected, _seen) do { \
|
|
|
|
if ((_expected) >= (_seen)) \
|
|
|
|
test_fail(); \
|
|
|
|
assert((_expected) < (_seen)); \
|
|
|
|
} while (0)
|
|
|
|
|
memblock tests: add top-down NUMA tests for memblock_alloc_try_nid*
Add tests for memblock_alloc_try_nid() and memblock_alloc_try_nid_raw()
where the simulated physical memory is set up with multiple NUMA nodes.
Additionally, all of these tests set nid != NUMA_NO_NODE. These tests are
run with a top-down allocation direction.
The tested scenarios are:
Range unrestricted:
- region can be allocated in the specific node requested:
+ there are no previously reserved regions
+ the requested node is partially reserved but has enough space
- the specific node requested cannot accommodate the request, but the
region can be allocated in a different node:
+ there are no previously reserved regions, but node is too small
+ the requested node is fully reserved
+ the requested node is partially reserved and does not have
enough space
Range restricted:
- region can be allocated in the specific node requested after dropping
min_addr:
+ range partially overlaps with two different nodes, where the first
node is the requested node
+ range partially overlaps with two different nodes, where the
requested node ends before min_addr
- region cannot be allocated in the specific node requested, but it can be
allocated in the requested range:
+ range overlaps with multiple nodes along node boundaries, and the
requested node ends before min_addr
+ range overlaps with multiple nodes along node boundaries, and the
requested node starts after max_addr
- region cannot be allocated in the specific node requested, but it can be
allocated after dropping min_addr:
+ range partially overlaps with two different nodes, where the
second node is the requested node
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Rebecca Mckeever <remckee0@gmail.com>
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/84009c5b3969337ccf89df850db56d364f8c228b.1663046060.git.remckee0@gmail.com
2022-09-13 00:21:10 -05:00
|
|
|
/**
|
|
|
|
* ASSERT_LE():
|
|
|
|
* Check the condition
|
|
|
|
* @_expected <= @_seen
|
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
|
|
|
*/
|
|
|
|
#define ASSERT_LE(_expected, _seen) do { \
|
|
|
|
if ((_expected) > (_seen)) \
|
|
|
|
test_fail(); \
|
|
|
|
assert((_expected) <= (_seen)); \
|
|
|
|
} while (0)
|
|
|
|
|
2022-08-27 00:42:43 -05:00
|
|
|
/**
|
|
|
|
* ASSERT_MEM_EQ():
|
|
|
|
* Check that the first @_size bytes of @_seen are all equal to @_expected.
|
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
|
|
|
*/
|
|
|
|
#define ASSERT_MEM_EQ(_seen, _expected, _size) do { \
|
|
|
|
for (int _i = 0; _i < (_size); _i++) { \
|
|
|
|
ASSERT_EQ(((char *)_seen)[_i], (_expected)); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2022-08-27 00:42:47 -05:00
|
|
|
/**
|
|
|
|
* ASSERT_MEM_NE():
|
|
|
|
* Check that none of the first @_size bytes of @_seen are equal to @_expected.
|
|
|
|
* If false, print failed test message (if running with --verbose) and then
|
|
|
|
* assert.
|
|
|
|
*/
|
|
|
|
#define ASSERT_MEM_NE(_seen, _expected, _size) do { \
|
|
|
|
for (int _i = 0; _i < (_size); _i++) { \
|
|
|
|
ASSERT_NE(((char *)_seen)[_i], (_expected)); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2022-07-03 23:06:56 -05:00
|
|
|
#define PREFIX_PUSH() prefix_push(__func__)
|
|
|
|
|
2022-02-28 15:46:44 +01:00
|
|
|
/*
|
|
|
|
* Available memory registered with memblock needs to be valid for allocs
|
|
|
|
* test to run. This is a convenience wrapper for memory allocated in
|
|
|
|
* dummy_physical_memory_init() that is later registered with memblock
|
|
|
|
* in setup_memblock().
|
|
|
|
*/
|
|
|
|
struct test_memory {
|
|
|
|
void *base;
|
|
|
|
};
|
2022-02-02 12:03:10 +01:00
|
|
|
|
|
|
|
struct region {
|
|
|
|
phys_addr_t base;
|
|
|
|
phys_addr_t size;
|
|
|
|
};
|
|
|
|
|
memblock tests: add top-down NUMA tests for memblock_alloc_try_nid*
Add tests for memblock_alloc_try_nid() and memblock_alloc_try_nid_raw()
where the simulated physical memory is set up with multiple NUMA nodes.
Additionally, all of these tests set nid != NUMA_NO_NODE. These tests are
run with a top-down allocation direction.
The tested scenarios are:
Range unrestricted:
- region can be allocated in the specific node requested:
+ there are no previously reserved regions
+ the requested node is partially reserved but has enough space
- the specific node requested cannot accommodate the request, but the
region can be allocated in a different node:
+ there are no previously reserved regions, but node is too small
+ the requested node is fully reserved
+ the requested node is partially reserved and does not have
enough space
Range restricted:
- region can be allocated in the specific node requested after dropping
min_addr:
+ range partially overlaps with two different nodes, where the first
node is the requested node
+ range partially overlaps with two different nodes, where the
requested node ends before min_addr
- region cannot be allocated in the specific node requested, but it can be
allocated in the requested range:
+ range overlaps with multiple nodes along node boundaries, and the
requested node ends before min_addr
+ range overlaps with multiple nodes along node boundaries, and the
requested node starts after max_addr
- region cannot be allocated in the specific node requested, but it can be
allocated after dropping min_addr:
+ range partially overlaps with two different nodes, where the
second node is the requested node
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Rebecca Mckeever <remckee0@gmail.com>
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/84009c5b3969337ccf89df850db56d364f8c228b.1663046060.git.remckee0@gmail.com
2022-09-13 00:21:10 -05:00
|
|
|
static inline phys_addr_t __maybe_unused region_end(struct memblock_region *rgn)
|
|
|
|
{
|
|
|
|
return rgn->base + rgn->size;
|
|
|
|
}
|
|
|
|
|
2022-02-28 15:46:43 +01:00
|
|
|
void reset_memblock_regions(void);
|
|
|
|
void reset_memblock_attributes(void);
|
2022-02-28 15:46:44 +01:00
|
|
|
void setup_memblock(void);
|
2022-09-13 00:21:09 -05:00
|
|
|
void setup_numa_memblock(const unsigned int node_fracs[]);
|
2022-02-28 15:46:44 +01:00
|
|
|
void dummy_physical_memory_init(void);
|
|
|
|
void dummy_physical_memory_cleanup(void);
|
2022-10-11 14:21:20 +08:00
|
|
|
phys_addr_t dummy_physical_memory_base(void);
|
2022-07-13 22:17:17 -05:00
|
|
|
void parse_args(int argc, char **argv);
|
2022-02-02 12:03:10 +01:00
|
|
|
|
2022-07-03 23:06:56 -05:00
|
|
|
void test_fail(void);
|
|
|
|
void test_pass(void);
|
|
|
|
void test_print(const char *fmt, ...);
|
|
|
|
void prefix_reset(void);
|
|
|
|
void prefix_push(const char *prefix);
|
|
|
|
void prefix_pop(void);
|
|
|
|
|
|
|
|
static inline void test_pass_pop(void)
|
|
|
|
{
|
|
|
|
test_pass();
|
|
|
|
prefix_pop();
|
|
|
|
}
|
|
|
|
|
2022-08-27 00:42:45 -05:00
|
|
|
static inline void run_top_down(int (*func)())
|
|
|
|
{
|
|
|
|
memblock_set_bottom_up(false);
|
|
|
|
prefix_push("top-down");
|
|
|
|
func();
|
|
|
|
prefix_pop();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void run_bottom_up(int (*func)())
|
|
|
|
{
|
|
|
|
memblock_set_bottom_up(true);
|
|
|
|
prefix_push("bottom-up");
|
|
|
|
func();
|
|
|
|
prefix_pop();
|
|
|
|
}
|
|
|
|
|
2022-08-27 00:42:47 -05:00
|
|
|
static inline void assert_mem_content(void *mem, int size, int flags)
|
|
|
|
{
|
|
|
|
if (flags & TEST_F_RAW)
|
|
|
|
ASSERT_MEM_NE(mem, 0, size);
|
|
|
|
else
|
|
|
|
ASSERT_MEM_EQ(mem, 0, size);
|
|
|
|
}
|
|
|
|
|
2022-02-02 12:03:10 +01:00
|
|
|
#endif
|