mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 12:00:00 +00:00
mm, slab: remove the corner case of inc_slabs_node()
We already have the inc_slabs_node() after kmem_cache_node->node[node] initialized in early_kmem_cache_node_alloc(), this special case of inc_slabs_node() can be removed. Then we don't need to consider the existence of kmem_cache_node in inc_slabs_node() anymore. Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
011568eb31
commit
3dd549a557
13
mm/slub.c
13
mm/slub.c
@ -1498,16 +1498,8 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
|
||||
{
|
||||
struct kmem_cache_node *n = get_node(s, node);
|
||||
|
||||
/*
|
||||
* May be called early in order to allocate a slab for the
|
||||
* kmem_cache_node structure. Solve the chicken-egg
|
||||
* dilemma by deferring the increment of the count during
|
||||
* bootstrap (see early_kmem_cache_node_alloc).
|
||||
*/
|
||||
if (likely(n)) {
|
||||
atomic_long_inc(&n->nr_slabs);
|
||||
atomic_long_add(objects, &n->total_objects);
|
||||
}
|
||||
atomic_long_inc(&n->nr_slabs);
|
||||
atomic_long_add(objects, &n->total_objects);
|
||||
}
|
||||
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
|
||||
{
|
||||
@ -4855,7 +4847,6 @@ static void early_kmem_cache_node_alloc(int node)
|
||||
slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
|
||||
|
||||
BUG_ON(!slab);
|
||||
inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
|
||||
if (slab_nid(slab) != node) {
|
||||
pr_err("SLUB: Unable to allocate memory from node %d\n", node);
|
||||
pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
|
||||
|
Loading…
x
Reference in New Issue
Block a user