mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm/sparse.c: do not waste pre allocated memmap space
Vincent has noticed [1] that there is something unusual with the memmap
allocations going on on his platform
: I noticed this because on my ARM64 platform, with 1 GiB of memory the
: first [and only] section is allocated from the zeroing path while with
: 2 GiB of memory the first 1 GiB section is allocated from the
: non-zeroing path.
The underlying problem is that although sparse_buffer_init allocates
enough memory for all sections on the node sparse_buffer_alloc is not
able to consume them due to mismatch in the expected allocation
alignement. While sparse_buffer_init preallocation uses the PAGE_SIZE
alignment the real memmap has to be aligned to section_map_size() this
results in a wasted initial chunk of the preallocated memmap and
unnecessary fallback allocation for a section.
While we are at it also change __populate_section_memmap to align to the
requested size because at least VMEMMAP has constrains to have memmap
properly aligned.
[1] http://lkml.kernel.org/r/20191030131122.8256-1-vincent.whitchurch@axis.com
[akpm@linux-foundation.org: tweak layout, per David]
Link: http://lkml.kernel.org/r/20191119092642.31799-1-mhocko@kernel.org
Fixes: 35fd1eb1e8
("mm/sparse: abstract sparse buffer allocations")
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Debugged-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Oscar Salvador <OSalvador@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
030eab4f9f
commit
09dbcf422e
14
mm/sparse.c
14
mm/sparse.c
@ -458,8 +458,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
|
||||
if (map)
|
||||
return map;
|
||||
|
||||
map = memblock_alloc_try_nid_raw(size,
|
||||
PAGE_SIZE, addr,
|
||||
map = memblock_alloc_try_nid_raw(size, size, addr,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
if (!map)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
|
||||
@ -482,10 +481,13 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
{
|
||||
phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
|
||||
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
|
||||
sparsemap_buf =
|
||||
memblock_alloc_try_nid_raw(size, PAGE_SIZE,
|
||||
addr,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
/*
|
||||
* Pre-allocated buffer is mainly used by __populate_section_memmap
|
||||
* and we want it to be properly aligned to the section size - this is
|
||||
* especially the case for VMEMMAP which maps memmap to PMDs
|
||||
*/
|
||||
sparsemap_buf = memblock_alloc_try_nid_raw(size, section_map_size(),
|
||||
addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
sparsemap_buf_end = sparsemap_buf + size;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user