mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
mm, slub: stop disabling irqs around get_partial()
The function get_partial() does not need to have irqs disabled as a whole. It's sufficient to convert spin_lock operations to their irq saving/restoring versions. As a result, it's now possible to reach the page allocator from the slab allocator without disabling and re-enabling interrupts on the way. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
9f101ee894
commit
4b1f449ded
22
mm/slub.c
22
mm/slub.c
@ -2010,11 +2010,12 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
|
|||||||
* Try to allocate a partial slab from a specific node.
|
* Try to allocate a partial slab from a specific node.
|
||||||
*/
|
*/
|
||||||
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
||||||
struct page **ret_page, gfp_t flags)
|
struct page **ret_page, gfp_t gfpflags)
|
||||||
{
|
{
|
||||||
struct page *page, *page2;
|
struct page *page, *page2;
|
||||||
void *object = NULL;
|
void *object = NULL;
|
||||||
unsigned int available = 0;
|
unsigned int available = 0;
|
||||||
|
unsigned long flags;
|
||||||
int objects;
|
int objects;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2026,11 +2027,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|||||||
if (!n || !n->nr_partial)
|
if (!n || !n->nr_partial)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
spin_lock(&n->list_lock);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
|
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
|
||||||
void *t;
|
void *t;
|
||||||
|
|
||||||
if (!pfmemalloc_match(page, flags))
|
if (!pfmemalloc_match(page, gfpflags))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
t = acquire_slab(s, n, page, object == NULL, &objects);
|
t = acquire_slab(s, n, page, object == NULL, &objects);
|
||||||
@ -2051,7 +2052,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
}
|
}
|
||||||
spin_unlock(&n->list_lock);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
return object;
|
return object;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2779,8 +2780,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
goto reread_page;
|
goto reread_page;
|
||||||
}
|
}
|
||||||
if (unlikely(!slub_percpu_partial(c)))
|
if (unlikely(!slub_percpu_partial(c))) {
|
||||||
|
local_irq_restore(flags);
|
||||||
goto new_objects; /* stolen by an IRQ handler */
|
goto new_objects; /* stolen by an IRQ handler */
|
||||||
|
}
|
||||||
|
|
||||||
page = c->page = slub_percpu_partial(c);
|
page = c->page = slub_percpu_partial(c);
|
||||||
slub_set_percpu_partial(c, page);
|
slub_set_percpu_partial(c, page);
|
||||||
@ -2789,18 +2792,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (unlikely(c->page)) {
|
|
||||||
local_irq_restore(flags);
|
|
||||||
goto reread_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_objects:
|
new_objects:
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
freelist = get_partial(s, gfpflags, node, &page);
|
freelist = get_partial(s, gfpflags, node, &page);
|
||||||
local_irq_restore(flags);
|
|
||||||
if (freelist)
|
if (freelist)
|
||||||
goto check_new_page;
|
goto check_new_page;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user