Merge branch 'slab/for-6.13/features' into slab/for-next

Merge the slab feature branch for 6.13:

- Add new slab_strict_numa parameter for per-object memory policies
  (Christoph Lameter)
This commit is contained in:
Vlastimil Babka 2024-11-16 21:21:51 +01:00
commit 9e19aa165c
3 changed files with 62 additions and 0 deletions

View File

@ -6147,6 +6147,16 @@
For more information see Documentation/mm/slub.rst.
(slub_nomerge legacy name also accepted for now)
slab_strict_numa [MM]
Support memory policies on a per object level
in the slab allocator. The default is for memory
policies to be applied at the folio level when
a new folio is needed or a partial folio is
retrieved from the lists. Increases overhead
in the slab fastpaths but gains more accurate
NUMA kernel object placement which helps with slow
interconnects in NUMA systems.
slram= [HW,MTD]
smart2= [HW]

View File

@ -175,6 +175,15 @@ can be influenced by kernel parameters:
``slab_max_order`` to 0, what cause minimum possible order of
slabs allocation.
``slab_strict_numa``
Enables the application of memory policies on each
allocation. This results in more accurate placement of
objects which may result in the reduction of accesses
to remote nodes. The default is to only apply memory
policies at the folio level when a new folio is acquired
or a folio is retrieved from the lists. Enabling this
option reduces the fastpath performance of the slab allocator.
SLUB Debug output
=================

View File

@ -218,6 +218,10 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_NUMA
static DEFINE_STATIC_KEY_FALSE(strict_numa);
#endif
/* Structure holding parameters for get_partial() call chain */
struct partial_context {
gfp_t flags;
@ -3949,6 +3953,28 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
object = c->freelist;
slab = c->slab;
#ifdef CONFIG_NUMA
if (static_branch_unlikely(&strict_numa) &&
node == NUMA_NO_NODE) {
struct mempolicy *mpol = current->mempolicy;
if (mpol) {
/*
* Special BIND rule support. If existing slab
* is in permitted set then do not redirect
* to a particular node.
* Otherwise we apply the memory policy to get
* the node we need to allocate on.
*/
if (mpol->mode != MPOL_BIND || !slab ||
!node_isset(slab_nid(slab), mpol->nodes))
node = mempolicy_slab_node();
}
}
#endif
if (!USE_LOCKLESS_FAST_PATH() ||
unlikely(!object || !slab || !node_match(slab, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
@ -5715,6 +5741,23 @@ static int __init setup_slub_min_objects(char *str)
__setup("slab_min_objects=", setup_slub_min_objects);
__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
#ifdef CONFIG_NUMA
static int __init setup_slab_strict_numa(char *str)
{
if (nr_node_ids > 1) {
static_branch_enable(&strict_numa);
pr_info("SLUB: Strict NUMA enabled.\n");
} else {
pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
}
return 1;
}
__setup("slab_strict_numa", setup_slab_strict_numa);
#endif
#ifdef CONFIG_HARDENED_USERCOPY
/*
* Rejects incorrectly sized objects and objects that are to be copied