mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
slub: Pass kmem_cache struct to lock and freeze slab
We need more information about the slab for the cmpxchg implementation. Signed-off-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
5cc6eee8a8
commit
61728d1efc
15
mm/slub.c
15
mm/slub.c
@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n,
|
|||||||
*
|
*
|
||||||
* Must hold list_lock.
|
* Must hold list_lock.
|
||||||
*/
|
*/
|
||||||
static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
|
static inline int lock_and_freeze_slab(struct kmem_cache *s,
|
||||||
struct page *page)
|
struct kmem_cache_node *n, struct page *page)
|
||||||
{
|
{
|
||||||
if (slab_trylock(page)) {
|
if (slab_trylock(page)) {
|
||||||
remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
|
|||||||
/*
|
/*
|
||||||
* Try to allocate a partial slab from a specific node.
|
* Try to allocate a partial slab from a specific node.
|
||||||
*/
|
*/
|
||||||
static struct page *get_partial_node(struct kmem_cache_node *n)
|
static struct page *get_partial_node(struct kmem_cache *s,
|
||||||
|
struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
|
|||||||
|
|
||||||
spin_lock(&n->list_lock);
|
spin_lock(&n->list_lock);
|
||||||
list_for_each_entry(page, &n->partial, lru)
|
list_for_each_entry(page, &n->partial, lru)
|
||||||
if (lock_and_freeze_slab(n, page))
|
if (lock_and_freeze_slab(s, n, page))
|
||||||
goto out;
|
goto out;
|
||||||
page = NULL;
|
page = NULL;
|
||||||
out:
|
out:
|
||||||
@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
|||||||
|
|
||||||
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
|
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
|
||||||
n->nr_partial > s->min_partial) {
|
n->nr_partial > s->min_partial) {
|
||||||
page = get_partial_node(n);
|
page = get_partial_node(s, n);
|
||||||
if (page) {
|
if (page) {
|
||||||
put_mems_allowed();
|
put_mems_allowed();
|
||||||
return page;
|
return page;
|
||||||
@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
||||||
|
|
||||||
page = get_partial_node(get_node(s, searchnode));
|
page = get_partial_node(s, get_node(s, searchnode));
|
||||||
if (page || node != NUMA_NO_NODE)
|
if (page || node != NUMA_NO_NODE)
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
{
|
{
|
||||||
void *prior;
|
void *prior;
|
||||||
void **object = (void *)x;
|
void **object = (void *)x;
|
||||||
unsigned long flags;
|
unsigned long uninitialized_var(flags);
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
|
Loading…
Reference in New Issue
Block a user