mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order
kmalloc cannot allocate memory from HIGHMEM. Allocating large amounts of memory currently bypasses the check and will simply leak the memory when page_address() returns NULL. To fix this, factor the GFP_SLAB_BUG_MASK check out of slab & slub, and call it from kmalloc_order() as well. In order to make the code clear, the warning message is put in one place. Signed-off-by: Long Li <lonuxli.64@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Link: http://lkml.kernel.org/r/20200704035027.GA62481@lilong Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dabc3e291d
commit
444050990d
10
mm/slab.c
10
mm/slab.c
@ -2589,13 +2589,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
|
||||
* Be lazy and only check for valid flags here, keeping it out of the
|
||||
* critical path in kmem_cache_alloc().
|
||||
*/
|
||||
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
||||
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
||||
flags &= ~GFP_SLAB_BUG_MASK;
|
||||
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
||||
invalid_mask, &invalid_mask, flags, &flags);
|
||||
dump_stack();
|
||||
}
|
||||
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||
flags = kmalloc_fix_flags(flags);
|
||||
|
||||
WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
|
||||
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
||||
|
||||
|
@ -152,6 +152,7 @@ void create_kmalloc_caches(slab_flags_t);
|
||||
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
|
||||
#endif
|
||||
|
||||
gfp_t kmalloc_fix_flags(gfp_t flags);
|
||||
|
||||
/* Functions provided by the slab allocators */
|
||||
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
|
||||
|
@ -26,6 +26,8 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#include "slab.h"
|
||||
|
||||
enum slab_state slab_state;
|
||||
@ -1332,6 +1334,18 @@ void __init create_kmalloc_caches(slab_flags_t flags)
|
||||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
gfp_t kmalloc_fix_flags(gfp_t flags)
|
||||
{
|
||||
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
||||
|
||||
flags &= ~GFP_SLAB_BUG_MASK;
|
||||
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
||||
invalid_mask, &invalid_mask, flags, &flags);
|
||||
dump_stack();
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid unnecessary overhead, we pass through large allocation requests
|
||||
* directly to the page allocator. We use __GFP_COMP, because we will need to
|
||||
@ -1342,6 +1356,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
||||
void *ret = NULL;
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||
flags = kmalloc_fix_flags(flags);
|
||||
|
||||
flags |= __GFP_COMP;
|
||||
page = alloc_pages(flags, order);
|
||||
if (likely(page)) {
|
||||
|
@ -1745,13 +1745,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
|
||||
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
{
|
||||
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
||||
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
||||
flags &= ~GFP_SLAB_BUG_MASK;
|
||||
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
||||
invalid_mask, &invalid_mask, flags, &flags);
|
||||
dump_stack();
|
||||
}
|
||||
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||
flags = kmalloc_fix_flags(flags);
|
||||
|
||||
return allocate_slab(s,
|
||||
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
||||
|
Loading…
Reference in New Issue
Block a user