mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
slub: pass kmem_cache_cpu pointer to get_partial()
Pass the kmem_cache_cpu pointer to get_partial(). That way we can avoid the this_cpu_write() statements. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
e6e82ea112
commit
acd19fd1a7
30
mm/slub.c
30
mm/slub.c
@ -1557,7 +1557,8 @@ static inline void remove_partial(struct kmem_cache_node *n,
|
|||||||
* Must hold list_lock.
|
* Must hold list_lock.
|
||||||
*/
|
*/
|
||||||
static inline int acquire_slab(struct kmem_cache *s,
|
static inline int acquire_slab(struct kmem_cache *s,
|
||||||
struct kmem_cache_node *n, struct page *page)
|
struct kmem_cache_node *n, struct page *page,
|
||||||
|
struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
void *freelist;
|
void *freelist;
|
||||||
unsigned long counters;
|
unsigned long counters;
|
||||||
@ -1586,9 +1587,9 @@ static inline int acquire_slab(struct kmem_cache *s,
|
|||||||
|
|
||||||
if (freelist) {
|
if (freelist) {
|
||||||
/* Populate the per cpu freelist */
|
/* Populate the per cpu freelist */
|
||||||
this_cpu_write(s->cpu_slab->freelist, freelist);
|
c->freelist = freelist;
|
||||||
this_cpu_write(s->cpu_slab->page, page);
|
c->page = page;
|
||||||
this_cpu_write(s->cpu_slab->node, page_to_nid(page));
|
c->node = page_to_nid(page);
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -1606,7 +1607,7 @@ static inline int acquire_slab(struct kmem_cache *s,
|
|||||||
* Try to allocate a partial slab from a specific node.
|
* Try to allocate a partial slab from a specific node.
|
||||||
*/
|
*/
|
||||||
static struct page *get_partial_node(struct kmem_cache *s,
|
static struct page *get_partial_node(struct kmem_cache *s,
|
||||||
struct kmem_cache_node *n)
|
struct kmem_cache_node *n, struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
@ -1621,7 +1622,7 @@ static struct page *get_partial_node(struct kmem_cache *s,
|
|||||||
|
|
||||||
spin_lock(&n->list_lock);
|
spin_lock(&n->list_lock);
|
||||||
list_for_each_entry(page, &n->partial, lru)
|
list_for_each_entry(page, &n->partial, lru)
|
||||||
if (acquire_slab(s, n, page))
|
if (acquire_slab(s, n, page, c))
|
||||||
goto out;
|
goto out;
|
||||||
page = NULL;
|
page = NULL;
|
||||||
out:
|
out:
|
||||||
@ -1632,7 +1633,8 @@ static struct page *get_partial_node(struct kmem_cache *s,
|
|||||||
/*
|
/*
|
||||||
* Get a page from somewhere. Search in increasing NUMA distances.
|
* Get a page from somewhere. Search in increasing NUMA distances.
|
||||||
*/
|
*/
|
||||||
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
||||||
|
struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
struct zonelist *zonelist;
|
struct zonelist *zonelist;
|
||||||
@ -1672,7 +1674,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
|||||||
|
|
||||||
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
|
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
|
||||||
n->nr_partial > s->min_partial) {
|
n->nr_partial > s->min_partial) {
|
||||||
page = get_partial_node(s, n);
|
page = get_partial_node(s, n, c);
|
||||||
if (page) {
|
if (page) {
|
||||||
put_mems_allowed();
|
put_mems_allowed();
|
||||||
return page;
|
return page;
|
||||||
@ -1687,16 +1689,17 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
|||||||
/*
|
/*
|
||||||
* Get a partial page, lock it and return it.
|
* Get a partial page, lock it and return it.
|
||||||
*/
|
*/
|
||||||
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node,
|
||||||
|
struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
||||||
|
|
||||||
page = get_partial_node(s, get_node(s, searchnode));
|
page = get_partial_node(s, get_node(s, searchnode), c);
|
||||||
if (page || node != NUMA_NO_NODE)
|
if (page || node != NUMA_NO_NODE)
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
return get_any_partial(s, flags);
|
return get_any_partial(s, flags, c);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
@ -1765,9 +1768,6 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
|
|||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
|
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Remove the cpu slab
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove the cpu slab
|
* Remove the cpu slab
|
||||||
@ -2116,7 +2116,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||||||
return object;
|
return object;
|
||||||
|
|
||||||
new_slab:
|
new_slab:
|
||||||
page = get_partial(s, gfpflags, node);
|
page = get_partial(s, gfpflags, node, c);
|
||||||
if (page) {
|
if (page) {
|
||||||
stat(s, ALLOC_FROM_PARTIAL);
|
stat(s, ALLOC_FROM_PARTIAL);
|
||||||
object = c->freelist;
|
object = c->freelist;
|
||||||
|
Loading…
Reference in New Issue
Block a user