mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-12 08:00:09 +00:00
s390/kasan: move memory needs estimation into a function
Also correct rounding downs in estimation calculations. Reviewed-by: Alexander Egorenkov <egorenar@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
e385b550fa
commit
0c4ec024a4
@ -7,6 +7,7 @@
|
||||
#include <asm/cpacf.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/kasan.h>
|
||||
#include "compressed/decompressor.h"
|
||||
#include "boot.h"
|
||||
|
||||
@ -179,34 +180,19 @@ unsigned long get_random_base(unsigned long safe_addr)
|
||||
if (memory_end_set)
|
||||
memory_limit = min(memory_limit, memory_end);
|
||||
|
||||
/*
|
||||
* Avoid putting kernel in the end of physical memory
|
||||
* which kasan will use for shadow memory and early pgtable
|
||||
* mapping allocations.
|
||||
*/
|
||||
memory_limit -= kasan_estimate_memory_needs(memory_limit);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
|
||||
if (safe_addr < INITRD_START + INITRD_SIZE)
|
||||
safe_addr = INITRD_START + INITRD_SIZE;
|
||||
}
|
||||
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
|
||||
|
||||
if ((IS_ENABLED(CONFIG_KASAN))) {
|
||||
/*
|
||||
* Estimate kasan memory requirements, which it will reserve
|
||||
* at the very end of available physical memory. To estimate
|
||||
* that, we take into account that kasan would require
|
||||
* 1/8 of available physical memory (for shadow memory) +
|
||||
* creating page tables for the whole memory + shadow memory
|
||||
* region (1 + 1/8). To keep page tables estimates simple take
|
||||
* the double of combined ptes size.
|
||||
*/
|
||||
memory_limit = get_mem_detect_end();
|
||||
if (memory_end_set && memory_limit > memory_end)
|
||||
memory_limit = memory_end;
|
||||
|
||||
/* for shadow memory */
|
||||
kasan_needs = memory_limit / 8;
|
||||
/* for paging structures */
|
||||
kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
|
||||
_PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
|
||||
memory_limit -= kasan_needs;
|
||||
}
|
||||
|
||||
kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
if (safe_addr + kernel_size > memory_limit)
|
||||
return 0;
|
||||
|
@ -17,10 +17,36 @@ extern void kasan_early_init(void);
|
||||
extern void kasan_copy_shadow_mapping(void);
|
||||
extern void kasan_free_early_identity(void);
|
||||
extern unsigned long kasan_vmax;
|
||||
|
||||
/*
|
||||
* Estimate kasan memory requirements, which it will reserve
|
||||
* at the very end of available physical memory. To estimate
|
||||
* that, we take into account that kasan would require
|
||||
* 1/8 of available physical memory (for shadow memory) +
|
||||
* creating page tables for the whole memory + shadow memory
|
||||
* region (1 + 1/8). To keep page tables estimates simple take
|
||||
* the double of combined ptes size.
|
||||
*
|
||||
* physmem parameter has to be already adjusted if not entire physical memory
|
||||
* would be used (e.g. due to effect of "mem=" option).
|
||||
*/
|
||||
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
|
||||
{
|
||||
unsigned long kasan_needs;
|
||||
unsigned long pages;
|
||||
/* for shadow memory */
|
||||
kasan_needs = round_up(physmem / 8, PAGE_SIZE);
|
||||
/* for paging structures */
|
||||
pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
|
||||
kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
|
||||
|
||||
return kasan_needs;
|
||||
}
|
||||
#else
|
||||
static inline void kasan_early_init(void) { }
|
||||
static inline void kasan_copy_shadow_mapping(void) { }
|
||||
static inline void kasan_free_early_identity(void) { }
|
||||
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user