2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-10-12 18:52:58 +03:00
|
|
|
/*
|
|
|
|
* This file contains kasan initialization code for ARM64.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
|
|
|
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "kasan: " fmt
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#include <linux/kernel.h>
|
2017-02-04 01:20:53 +01:00
|
|
|
#include <linux/sched/task.h>
|
2015-10-12 18:52:58 +03:00
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/start_kernel.h>
|
2017-01-10 13:35:49 -08:00
|
|
|
#include <linux/mm.h>
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2016-01-25 11:45:02 +00:00
|
|
|
#include <asm/mmu_context.h>
|
2016-02-16 13:52:40 +01:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2015-10-12 18:52:58 +03:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgalloc.h>
|
2016-02-16 13:52:40 +01:00
|
|
|
#include <asm/sections.h>
|
2015-10-12 18:52:58 +03:00
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2020-12-22 12:00:53 -08:00
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
|
|
|
|
2024-02-14 13:29:21 +01:00
|
|
|
static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2017-01-10 13:35:49 -08:00
|
|
|
/*
|
|
|
|
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
|
|
|
|
* directly on kernel symbols (bm_p*d). All the early functions are called too
|
|
|
|
* early to use lm_alias so __p*d_populate functions must be used to populate
|
|
|
|
* with the physical address from __pa_symbol.
|
|
|
|
*/
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
2018-10-30 15:08:04 -07:00
|
|
|
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
|
2017-11-15 17:36:40 -08:00
|
|
|
__pa(MAX_DMA_ADDRESS),
|
2021-11-05 11:05:09 -04:00
|
|
|
MEMBLOCK_ALLOC_NOLEAKTRACE, node);
|
2019-03-11 23:30:31 -07:00
|
|
|
if (!p)
|
|
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
|
|
|
|
__func__, PAGE_SIZE, PAGE_SIZE, node,
|
|
|
|
__pa(MAX_DMA_ADDRESS));
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
return __pa(p);
|
|
|
|
}
|
|
|
|
|
2018-12-28 00:30:09 -08:00
|
|
|
static phys_addr_t __init kasan_alloc_raw_page(int node)
|
|
|
|
{
|
|
|
|
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
|
|
|
|
__pa(MAX_DMA_ADDRESS),
|
2021-11-05 11:05:09 -04:00
|
|
|
MEMBLOCK_ALLOC_NOLEAKTRACE,
|
|
|
|
node);
|
2019-03-11 23:30:31 -07:00
|
|
|
if (!p)
|
|
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
|
|
|
|
__func__, PAGE_SIZE, PAGE_SIZE, node,
|
|
|
|
__pa(MAX_DMA_ADDRESS));
|
|
|
|
|
2018-12-28 00:30:09 -08:00
|
|
|
return __pa(p);
|
|
|
|
}
|
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
|
2017-11-15 17:36:40 -08:00
|
|
|
bool early)
|
|
|
|
{
|
2018-02-15 11:14:56 +00:00
|
|
|
if (pmd_none(READ_ONCE(*pmdp))) {
|
2018-12-28 00:30:01 -08:00
|
|
|
phys_addr_t pte_phys = early ?
|
|
|
|
__pa_symbol(kasan_early_shadow_pte)
|
|
|
|
: kasan_alloc_zeroed_page(node);
|
2018-02-15 11:14:56 +00:00
|
|
|
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
return early ? pte_offset_kimg(pmdp, addr)
|
|
|
|
: pte_offset_kernel(pmdp, addr);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
2017-11-15 17:36:40 -08:00
|
|
|
bool early)
|
|
|
|
{
|
2018-02-15 11:14:56 +00:00
|
|
|
if (pud_none(READ_ONCE(*pudp))) {
|
2018-12-28 00:30:01 -08:00
|
|
|
phys_addr_t pmd_phys = early ?
|
|
|
|
__pa_symbol(kasan_early_shadow_pmd)
|
|
|
|
: kasan_alloc_zeroed_page(node);
|
2021-03-10 11:49:40 +01:00
|
|
|
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:46:23 -07:00
|
|
|
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
|
2017-11-15 17:36:40 -08:00
|
|
|
bool early)
|
|
|
|
{
|
2020-06-04 16:46:23 -07:00
|
|
|
if (p4d_none(READ_ONCE(*p4dp))) {
|
2018-12-28 00:30:01 -08:00
|
|
|
phys_addr_t pud_phys = early ?
|
|
|
|
__pa_symbol(kasan_early_shadow_pud)
|
|
|
|
: kasan_alloc_zeroed_page(node);
|
2021-03-10 11:49:40 +01:00
|
|
|
__p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:46:23 -07:00
|
|
|
return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
|
2017-11-15 17:36:40 -08:00
|
|
|
}
|
|
|
|
|
2024-02-14 13:29:21 +01:00
|
|
|
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node,
|
|
|
|
bool early)
|
|
|
|
{
|
|
|
|
if (pgd_none(READ_ONCE(*pgdp))) {
|
|
|
|
phys_addr_t p4d_phys = early ?
|
|
|
|
__pa_symbol(kasan_early_shadow_p4d)
|
|
|
|
: kasan_alloc_zeroed_page(node);
|
|
|
|
__pgd_populate(pgdp, p4d_phys, PGD_TYPE_TABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return early ? p4d_offset_kimg(pgdp, addr) : p4d_offset(pgdp, addr);
|
|
|
|
}
|
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
2017-11-15 17:36:40 -08:00
|
|
|
unsigned long end, int node, bool early)
|
|
|
|
{
|
|
|
|
unsigned long next;
|
2018-02-15 11:14:56 +00:00
|
|
|
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
|
|
|
do {
|
2018-12-28 00:30:01 -08:00
|
|
|
phys_addr_t page_phys = early ?
|
|
|
|
__pa_symbol(kasan_early_shadow_page)
|
2018-12-28 00:30:09 -08:00
|
|
|
: kasan_alloc_raw_page(node);
|
|
|
|
if (!early)
|
|
|
|
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
|
2015-10-12 18:52:58 +03:00
|
|
|
next = addr + PAGE_SIZE;
|
2024-02-15 10:31:57 +00:00
|
|
|
__set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
|
|
|
} while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep)));
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
2017-11-15 17:36:40 -08:00
|
|
|
unsigned long end, int node, bool early)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
|
|
|
unsigned long next;
|
2018-02-15 11:14:56 +00:00
|
|
|
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
2018-02-15 11:14:56 +00:00
|
|
|
kasan_pte_populate(pmdp, addr, next, node, early);
|
|
|
|
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:46:23 -07:00
|
|
|
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
|
2017-11-15 17:36:40 -08:00
|
|
|
unsigned long end, int node, bool early)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
|
|
|
unsigned long next;
|
2020-06-04 16:46:23 -07:00
|
|
|
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
2018-02-15 11:14:56 +00:00
|
|
|
kasan_pmd_populate(pudp, addr, next, node, early);
|
|
|
|
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:46:23 -07:00
|
|
|
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
|
|
|
|
unsigned long end, int node, bool early)
|
|
|
|
{
|
|
|
|
unsigned long next;
|
2024-02-14 13:29:21 +01:00
|
|
|
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
|
2020-06-04 16:46:23 -07:00
|
|
|
|
|
|
|
do {
|
|
|
|
next = p4d_addr_end(addr, end);
|
|
|
|
kasan_pud_populate(p4dp, addr, next, node, early);
|
2024-02-14 13:29:21 +01:00
|
|
|
} while (p4dp++, addr = next, addr != end && p4d_none(READ_ONCE(*p4dp)));
|
2020-06-04 16:46:23 -07:00
|
|
|
}
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
|
|
|
int node, bool early)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
|
|
|
unsigned long next;
|
2018-02-15 11:14:56 +00:00
|
|
|
pgd_t *pgdp;
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2018-02-15 11:14:56 +00:00
|
|
|
pgdp = pgd_offset_k(addr);
|
2015-10-12 18:52:58 +03:00
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
2020-06-04 16:46:23 -07:00
|
|
|
kasan_p4d_populate(pgdp, addr, next, node, early);
|
2018-02-15 11:14:56 +00:00
|
|
|
} while (pgdp++, addr = next, addr != end);
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2024-02-14 13:29:21 +01:00
|
|
|
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS > 4
|
|
|
|
#define SHADOW_ALIGN P4D_SIZE
|
|
|
|
#else
|
|
|
|
#define SHADOW_ALIGN PUD_SIZE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return whether 'addr' is aligned to the size covered by a root level
|
|
|
|
* descriptor.
|
|
|
|
*/
|
|
|
|
static bool __init root_level_aligned(u64 addr)
|
|
|
|
{
|
|
|
|
int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT - 3);
|
|
|
|
|
|
|
|
return (addr % (PAGE_SIZE << shift)) == 0;
|
|
|
|
}
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
/* The early shadow maps everything to a single page of zeroes */
|
2015-10-13 14:01:06 +01:00
|
|
|
asmlinkage void __init kasan_early_init(void)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
2018-02-06 15:36:44 -08:00
|
|
|
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
|
|
|
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
2024-02-14 13:29:21 +01:00
|
|
|
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), SHADOW_ALIGN));
|
|
|
|
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), SHADOW_ALIGN));
|
|
|
|
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, SHADOW_ALIGN));
|
|
|
|
|
|
|
|
if (!root_level_aligned(KASAN_SHADOW_START)) {
|
|
|
|
/*
|
|
|
|
* The start address is misaligned, and so the next level table
|
|
|
|
* will be shared with the linear region. This can happen with
|
|
|
|
* 4 or 5 level paging, so install a generic pte_t[] as the
|
|
|
|
* next level. This prevents the kasan_pgd_populate call below
|
|
|
|
* from inserting an entry that refers to the shared KASAN zero
|
|
|
|
* shadow pud_t[]/p4d_t[], which could end up getting corrupted
|
|
|
|
* when the linear region is mapped.
|
|
|
|
*/
|
|
|
|
static pte_t tbl[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
|
pgd_t *pgdp = pgd_offset_k(KASAN_SHADOW_START);
|
|
|
|
|
|
|
|
set_pgd(pgdp, __pgd(__pa_symbol(tbl) | PGD_TYPE_TABLE));
|
|
|
|
}
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
|
|
|
|
static void __init kasan_map_populate(unsigned long start, unsigned long end,
|
|
|
|
int node)
|
|
|
|
{
|
|
|
|
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2024-02-14 13:29:21 +01:00
|
|
|
/*
|
|
|
|
* Return the descriptor index of 'addr' in the root level table
|
|
|
|
*/
|
|
|
|
static int __init root_level_idx(u64 addr)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
|
|
|
/*
|
2024-02-14 13:29:21 +01:00
|
|
|
* On 64k pages, the TTBR1 range root tables are extended for 52-bit
|
|
|
|
* virtual addressing, and TTBR1 will simply point to the pgd_t entry
|
|
|
|
* that covers the start of the 48-bit addressable VA space if LVA is
|
|
|
|
* not implemented. This means we need to index the table as usual,
|
|
|
|
* instead of masking off bits based on vabits_actual.
|
2015-10-12 18:52:58 +03:00
|
|
|
*/
|
2024-02-14 13:29:21 +01:00
|
|
|
u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS
|
|
|
|
: vabits_actual;
|
|
|
|
int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - 3);
|
|
|
|
|
|
|
|
return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clone a next level table from swapper_pg_dir into tmp_pg_dir
|
|
|
|
*/
|
|
|
|
static void __init clone_next_level(u64 addr, pgd_t *tmp_pg_dir, pud_t *pud)
|
|
|
|
{
|
|
|
|
int idx = root_level_idx(addr);
|
|
|
|
pgd_t pgd = READ_ONCE(swapper_pg_dir[idx]);
|
|
|
|
pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd));
|
|
|
|
|
|
|
|
memcpy(pud, pudp, PAGE_SIZE);
|
|
|
|
tmp_pg_dir[idx] = __pgd(__phys_to_pgd_val(__pa_symbol(pud)) |
|
|
|
|
PUD_TYPE_TABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the descriptor index of 'addr' in the next level table
|
|
|
|
*/
|
|
|
|
static int __init next_level_idx(u64 addr)
|
|
|
|
{
|
|
|
|
int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT - 3);
|
|
|
|
|
|
|
|
return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dereference the table descriptor at 'pgd_idx' and clear the entries from
|
|
|
|
* 'start' to 'end' (exclusive) from the table.
|
|
|
|
*/
|
|
|
|
static void __init clear_next_level(int pgd_idx, int start, int end)
|
|
|
|
{
|
|
|
|
pgd_t pgd = READ_ONCE(swapper_pg_dir[pgd_idx]);
|
|
|
|
pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd));
|
|
|
|
|
|
|
|
memset(&pudp[start], 0, (end - start) * sizeof(pud_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init clear_shadow(u64 start, u64 end)
|
|
|
|
{
|
|
|
|
int l = root_level_idx(start), m = root_level_idx(end);
|
|
|
|
|
|
|
|
if (!root_level_aligned(start))
|
|
|
|
clear_next_level(l++, next_level_idx(start), PTRS_PER_PTE);
|
|
|
|
if (!root_level_aligned(end))
|
|
|
|
clear_next_level(m, 0, next_level_idx(end));
|
|
|
|
memset(&swapper_pg_dir[l], 0, (m - l) * sizeof(pgd_t));
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2020-12-22 12:00:53 -08:00
|
|
|
static void __init kasan_init_shadow(void)
|
2015-10-12 18:52:58 +03:00
|
|
|
{
|
2024-02-14 13:29:21 +01:00
|
|
|
static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
|
2016-02-16 13:52:40 +01:00
|
|
|
u64 kimg_shadow_start, kimg_shadow_end;
|
2023-05-30 12:03:24 +01:00
|
|
|
u64 mod_shadow_start;
|
2021-03-24 12:05:18 +08:00
|
|
|
u64 vmalloc_shadow_end;
|
2020-10-13 16:58:08 -07:00
|
|
|
phys_addr_t pa_start, pa_end;
|
|
|
|
u64 i;
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2021-03-24 12:05:19 +08:00
|
|
|
kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
|
|
|
|
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
|
2016-02-16 13:52:40 +01:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 14:12:01 +01:00
|
|
|
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
|
|
|
|
|
2021-03-24 12:05:18 +08:00
|
|
|
vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
|
|
|
|
|
2015-10-12 18:52:58 +03:00
|
|
|
/*
|
|
|
|
* We are going to perform proper setup of shadow memory.
|
2018-10-04 17:06:46 +01:00
|
|
|
* At first we should unmap early shadow (clear_pgds() call below).
|
2015-10-12 18:52:58 +03:00
|
|
|
* However, instrumented code couldn't execute without shadow memory.
|
|
|
|
* tmp_pg_dir used to keep early shadow mapped until full shadow
|
|
|
|
* setup will be finished.
|
|
|
|
*/
|
|
|
|
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
|
2024-02-14 13:29:21 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the start or end address of the shadow region is not aligned to
|
|
|
|
* the root level size, we have to allocate a temporary next-level table
|
|
|
|
* in each case, clone the next level of descriptors, and install the
|
|
|
|
* table into tmp_pg_dir. Note that with 5 levels of paging, the next
|
|
|
|
* level will in fact be p4d_t, but that makes no difference in this
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
if (!root_level_aligned(KASAN_SHADOW_START))
|
|
|
|
clone_next_level(KASAN_SHADOW_START, tmp_pg_dir, pud[0]);
|
|
|
|
if (!root_level_aligned(KASAN_SHADOW_END))
|
|
|
|
clone_next_level(KASAN_SHADOW_END, tmp_pg_dir, pud[1]);
|
2016-01-25 11:45:02 +00:00
|
|
|
dsb(ishst);
|
2024-02-14 13:29:10 +01:00
|
|
|
cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2024-02-14 13:29:21 +01:00
|
|
|
clear_shadow(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
|
2021-03-24 12:05:19 +08:00
|
|
|
early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
|
2016-02-16 13:52:40 +01:00
|
|
|
|
2019-08-14 14:28:48 +01:00
|
|
|
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
|
2019-08-07 16:55:14 +01:00
|
|
|
(void *)mod_shadow_start);
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 14:12:01 +01:00
|
|
|
|
2023-05-30 12:03:24 +01:00
|
|
|
BUILD_BUG_ON(VMALLOC_START != MODULES_END);
|
|
|
|
kasan_populate_early_shadow((void *)vmalloc_shadow_end,
|
|
|
|
(void *)KASAN_SHADOW_END);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
2020-10-13 16:58:08 -07:00
|
|
|
for_each_mem_range(i, &pa_start, &pa_end) {
|
|
|
|
void *start = (void *)__phys_to_virt(pa_start);
|
|
|
|
void *end = (void *)__phys_to_virt(pa_end);
|
2015-10-12 18:52:58 +03:00
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
break;
|
|
|
|
|
2017-11-15 17:36:40 -08:00
|
|
|
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
|
|
|
|
(unsigned long)kasan_mem_to_shadow(end),
|
arm64: kasan: avoid pfn_to_nid() before page array is initialized
In arm64's kasan_init(), we use pfn_to_nid() to find the NUMA node a
span of memory is in, hoping to allocate shadow from the same NUMA node.
However, at this point, the page array has not been initialized, and
thus this is bogus.
Since commit:
f165b378bbdf6c8a ("mm: uninitialized struct page poisoning sanity")
... accessing fields of the page array results in a boot time Oops(),
highlighting this problem:
[ 0.000000] Unable to handle kernel paging request at virtual address dfff200000000000
[ 0.000000] Mem abort info:
[ 0.000000] ESR = 0x96000004
[ 0.000000] Exception class = DABT (current EL), IL = 32 bits
[ 0.000000] SET = 0, FnV = 0
[ 0.000000] EA = 0, S1PTW = 0
[ 0.000000] Data abort info:
[ 0.000000] ISV = 0, ISS = 0x00000004
[ 0.000000] CM = 0, WnR = 0
[ 0.000000] [dfff200000000000] address between user and kernel address ranges
[ 0.000000] Internal error: Oops: 96000004 [#1] PREEMPT SMP
[ 0.000000] Modules linked in:
[ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.16.0-07317-gf165b378bbdf #42
[ 0.000000] Hardware name: ARM Juno development board (r1) (DT)
[ 0.000000] pstate: 80000085 (Nzcv daIf -PAN -UAO)
[ 0.000000] pc : __asan_load8+0x8c/0xa8
[ 0.000000] lr : __dump_page+0x3c/0x3b8
[ 0.000000] sp : ffff2000099b7ca0
[ 0.000000] x29: ffff2000099b7ca0 x28: ffff20000a1762c0
[ 0.000000] x27: ffff7e0000000000 x26: ffff2000099dd000
[ 0.000000] x25: ffff200009a3f960 x24: ffff200008f9c38c
[ 0.000000] x23: ffff20000a9d3000 x22: ffff200009735430
[ 0.000000] x21: fffffffffffffffe x20: ffff7e0001e50420
[ 0.000000] x19: ffff7e0001e50400 x18: 0000000000001840
[ 0.000000] x17: ffffffffffff8270 x16: 0000000000001840
[ 0.000000] x15: 0000000000001920 x14: 0000000000000004
[ 0.000000] x13: 0000000000000000 x12: 0000000000000800
[ 0.000000] x11: 1ffff0012d0f89ff x10: ffff10012d0f89ff
[ 0.000000] x9 : 0000000000000000 x8 : ffff8009687c5000
[ 0.000000] x7 : 0000000000000000 x6 : ffff10000f282000
[ 0.000000] x5 : 0000000000000040 x4 : fffffffffffffffe
[ 0.000000] x3 : 0000000000000000 x2 : dfff200000000000
[ 0.000000] x1 : 0000000000000005 x0 : 0000000000000000
[ 0.000000] Process swapper (pid: 0, stack limit = 0x (ptrval))
[ 0.000000] Call trace:
[ 0.000000] __asan_load8+0x8c/0xa8
[ 0.000000] __dump_page+0x3c/0x3b8
[ 0.000000] dump_page+0xc/0x18
[ 0.000000] kasan_init+0x2e8/0x5a8
[ 0.000000] setup_arch+0x294/0x71c
[ 0.000000] start_kernel+0xdc/0x500
[ 0.000000] Code: aa0403e0 9400063c 17ffffee d343fc00 (38e26800)
[ 0.000000] ---[ end trace 67064f0e9c0cc338 ]---
[ 0.000000] Kernel panic - not syncing: Attempted to kill the idle task!
[ 0.000000] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]---
Let's fix this by using early_pfn_to_nid(), as other architectures do in
their kasan init code. Note that early_pfn_to_nid acquires the nid from
the memblock array, which we iterate over in kasan_init(), so this
should be fine.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 39d114ddc6822302 ("arm64: add KASAN support")
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-04-16 14:44:41 +01:00
|
|
|
early_pfn_to_nid(virt_to_pfn(start)));
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
|
|
|
|
2016-01-11 14:50:21 +01:00
|
|
|
/*
|
2018-12-28 00:30:01 -08:00
|
|
|
* KAsan may reuse the contents of kasan_early_shadow_pte directly,
|
|
|
|
* so we should make sure that it maps the zero page read-only.
|
2016-01-11 14:50:21 +01:00
|
|
|
*/
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
2024-02-15 10:31:57 +00:00
|
|
|
__set_pte(&kasan_early_shadow_pte[i],
|
2018-12-28 00:30:01 -08:00
|
|
|
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
|
|
|
|
PAGE_KERNEL_RO));
|
2016-01-11 14:50:21 +01:00
|
|
|
|
2018-12-28 00:30:09 -08:00
|
|
|
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
|
2024-02-14 13:29:10 +01:00
|
|
|
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
2020-12-22 12:00:53 -08:00
|
|
|
}
|
|
|
|
|
2020-12-22 12:00:56 -08:00
|
|
|
static void __init kasan_init_depth(void)
|
|
|
|
{
|
|
|
|
init_task.kasan_depth = 0;
|
|
|
|
}
|
|
|
|
|
2021-11-05 13:39:47 -07:00
|
|
|
#ifdef CONFIG_KASAN_VMALLOC
|
|
|
|
void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
|
|
|
|
{
|
|
|
|
unsigned long shadow_start, shadow_end;
|
|
|
|
|
|
|
|
if (!is_vmalloc_or_module_addr(start))
|
|
|
|
return;
|
|
|
|
|
|
|
|
shadow_start = (unsigned long)kasan_mem_to_shadow(start);
|
|
|
|
shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
|
|
|
|
shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
|
|
|
|
shadow_end = ALIGN(shadow_end, PAGE_SIZE);
|
|
|
|
kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-12-22 12:00:53 -08:00
|
|
|
void __init kasan_init(void)
|
|
|
|
{
|
|
|
|
kasan_init_shadow();
|
2020-12-22 12:00:56 -08:00
|
|
|
kasan_init_depth();
|
2020-12-22 12:01:00 -08:00
|
|
|
#if defined(CONFIG_KASAN_GENERIC)
|
2023-10-06 17:18:42 +02:00
|
|
|
/*
|
|
|
|
* Generic KASAN is now fully initialized.
|
|
|
|
* Software and Hardware Tag-Based modes still require
|
|
|
|
* kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly.
|
|
|
|
*/
|
2021-11-10 20:32:49 -08:00
|
|
|
pr_info("KernelAddressSanitizer initialized (generic)\n");
|
2020-12-22 12:01:00 -08:00
|
|
|
#endif
|
2015-10-12 18:52:58 +03:00
|
|
|
}
|
2020-12-22 12:01:00 -08:00
|
|
|
|
|
|
|
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|