mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
memblock: implement for_each_reserved_mem_region() using __next_mem_region()
Iteration over memblock.reserved with for_each_reserved_mem_region() used __next_reserved_mem_region() that implemented a subset of __next_mem_region(). Use __for_each_mem_range() and, essentially, __next_mem_region() with appropriate parameters to reduce code duplication. While on it, rename for_each_reserved_mem_region() to for_each_reserved_mem_range() for consistency. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> [.clang-format] Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Daniel Axtens <dja@axtens.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Emil Renner Berthing <kernel@esmil.dk> Cc: Hari Bathini <hbathini@linux.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: https://lkml.kernel.org/r/20200818151634.14343-17-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5bd0960b85
commit
9f3d5eaa3c
@ -273,7 +273,7 @@ ForEachMacros:
|
||||
- 'for_each_registered_fb'
|
||||
- 'for_each_requested_gpio'
|
||||
- 'for_each_requested_gpio_in_range'
|
||||
- 'for_each_reserved_mem_region'
|
||||
- 'for_each_reserved_mem_range'
|
||||
- 'for_each_rtd_codec_dais'
|
||||
- 'for_each_rtd_codec_dais_rollback'
|
||||
- 'for_each_rtd_components'
|
||||
|
@ -257,7 +257,7 @@ static int __init reserve_memblock_reserved_regions(void)
|
||||
if (!memblock_is_region_reserved(mem->start, mem_size))
|
||||
continue;
|
||||
|
||||
for_each_reserved_mem_region(j, &r_start, &r_end) {
|
||||
for_each_reserved_mem_range(j, &r_start, &r_end) {
|
||||
resource_size_t start, end;
|
||||
|
||||
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
|
||||
|
@ -2198,7 +2198,7 @@ static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
|
||||
|
||||
addr_end = addr + size - 1;
|
||||
|
||||
for_each_reserved_mem_region(i, &start, &end) {
|
||||
for_each_reserved_mem_range(i, &start, &end) {
|
||||
if (addr >= start && addr_end <= end)
|
||||
return true;
|
||||
}
|
||||
|
@ -132,9 +132,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
|
||||
struct memblock_type *type_b, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end, int *out_nid);
|
||||
|
||||
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end);
|
||||
|
||||
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
||||
@ -224,7 +221,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
|
||||
MEMBLOCK_NONE, p_start, p_end, NULL)
|
||||
|
||||
/**
|
||||
* for_each_reserved_mem_region - iterate over all reserved memblock areas
|
||||
* for_each_reserved_mem_range - iterate over all reserved memblock areas
|
||||
* @i: u64 used as loop variable
|
||||
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
||||
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
||||
@ -232,10 +229,9 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
|
||||
* Walks over reserved areas of memblock. Available as soon as memblock
|
||||
* is initialized.
|
||||
*/
|
||||
#define for_each_reserved_mem_region(i, p_start, p_end) \
|
||||
for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
|
||||
i != (u64)ULLONG_MAX; \
|
||||
__next_reserved_mem_region(&i, p_start, p_end))
|
||||
#define for_each_reserved_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
|
||||
MEMBLOCK_NONE, p_start, p_end, NULL)
|
||||
|
||||
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
|
||||
{
|
||||
|
@ -132,6 +132,14 @@ struct memblock_type physmem = {
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* keep a pointer to &memblock.memory in the text section to use it in
|
||||
* __next_mem_range() and its helpers.
|
||||
* For architectures that do not keep memblock data after init, this
|
||||
* pointer will be reset to NULL at memblock_discard()
|
||||
*/
|
||||
static __refdata struct memblock_type *memblock_memory = &memblock.memory;
|
||||
|
||||
#define for_each_memblock_type(i, memblock_type, rgn) \
|
||||
for (i = 0, rgn = &memblock_type->regions[0]; \
|
||||
i < memblock_type->cnt; \
|
||||
@ -402,6 +410,8 @@ void __init memblock_discard(void)
|
||||
memblock.memory.max);
|
||||
__memblock_free_late(addr, size);
|
||||
}
|
||||
|
||||
memblock_memory = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -952,42 +962,16 @@ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
|
||||
return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* __next_reserved_mem_region - next function for for_each_reserved_region()
|
||||
* @idx: pointer to u64 loop variable
|
||||
* @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
|
||||
* @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
|
||||
*
|
||||
* Iterate over all reserved memory regions.
|
||||
*/
|
||||
void __init_memblock __next_reserved_mem_region(u64 *idx,
|
||||
phys_addr_t *out_start,
|
||||
phys_addr_t *out_end)
|
||||
{
|
||||
struct memblock_type *type = &memblock.reserved;
|
||||
|
||||
if (*idx < type->cnt) {
|
||||
struct memblock_region *r = &type->regions[*idx];
|
||||
phys_addr_t base = r->base;
|
||||
phys_addr_t size = r->size;
|
||||
|
||||
if (out_start)
|
||||
*out_start = base;
|
||||
if (out_end)
|
||||
*out_end = base + size - 1;
|
||||
|
||||
*idx += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* signal end of iteration */
|
||||
*idx = ULLONG_MAX;
|
||||
}
|
||||
|
||||
static bool should_skip_region(struct memblock_region *m, int nid, int flags)
|
||||
static bool should_skip_region(struct memblock_type *type,
|
||||
struct memblock_region *m,
|
||||
int nid, int flags)
|
||||
{
|
||||
int m_nid = memblock_get_region_node(m);
|
||||
|
||||
/* we never skip regions when iterating memblock.reserved or physmem */
|
||||
if (type != memblock_memory)
|
||||
return false;
|
||||
|
||||
/* only memory regions are associated with nodes, check it */
|
||||
if (nid != NUMA_NO_NODE && nid != m_nid)
|
||||
return true;
|
||||
@ -1052,7 +1036,7 @@ void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
|
||||
phys_addr_t m_end = m->base + m->size;
|
||||
int m_nid = memblock_get_region_node(m);
|
||||
|
||||
if (should_skip_region(m, nid, flags))
|
||||
if (should_skip_region(type_a, m, nid, flags))
|
||||
continue;
|
||||
|
||||
if (!type_b) {
|
||||
@ -1156,7 +1140,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
|
||||
phys_addr_t m_end = m->base + m->size;
|
||||
int m_nid = memblock_get_region_node(m);
|
||||
|
||||
if (should_skip_region(m, nid, flags))
|
||||
if (should_skip_region(type_a, m, nid, flags))
|
||||
continue;
|
||||
|
||||
if (!type_b) {
|
||||
@ -1981,7 +1965,7 @@ static unsigned long __init free_low_memory_core_early(void)
|
||||
|
||||
memblock_clear_hotplug(0, -1);
|
||||
|
||||
for_each_reserved_mem_region(i, &start, &end)
|
||||
for_each_reserved_mem_range(i, &start, &end)
|
||||
reserve_bootmem_region(start, end);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user