x86/boot/64: Avoid intentional absolute symbol references in .head.text

The code in .head.text executes from a 1:1 mapping and cannot generally
refer to global variables using their kernel virtual addresses. However,
there are some occurrences of such references that are valid: the kernel
virtual addresses of _text and _end are needed to populate the page
tables correctly, and some other section markers are used in a similar
way.

To avoid the need for making exceptions to the rule that .head.text must
not contain any absolute symbol references, derive these addresses from
the RIP-relative 1:1 mapped physical addresses, which can be safely
determined using RIP_REL_REF().

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lore.kernel.org/r/20241205112804.3416920-12-ardb+git@google.com
This commit is contained in:
Ard Biesheuvel 2024-12-05 12:28:08 +01:00 committed by Ingo Molnar
parent 093562198e
commit 0d9b9a328c

View File

@ -91,9 +91,11 @@ static inline bool check_la57_support(void)
return true;
}
static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
unsigned long vaddr, vaddr_end;
unsigned long paddr, paddr_end;
int i;
/* Encrypt the kernel and related (if SME is active) */
@ -106,10 +108,10 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* attribute.
*/
if (sme_get_me_mask()) {
vaddr = (unsigned long)__start_bss_decrypted;
vaddr_end = (unsigned long)__end_bss_decrypted;
paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted);
paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted);
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
for (; paddr < paddr_end; paddr += PMD_SIZE) {
/*
* On SNP, transition the page to shared in the RMP table so that
* it is consistent with the page table attribute change.
@ -118,11 +120,11 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* mapping (kernel .text). PVALIDATE, by way of
* early_snp_set_memory_shared(), requires a valid virtual
* address but the kernel is currently running off of the identity
* mapping so use __pa() to get a *currently* valid virtual address.
* mapping so use the PA to get a *currently* valid virtual address.
*/
early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
i = pmd_index(vaddr);
i = pmd_index(paddr - p2v_offset);
pmd[i] -= sme_get_me_mask();
}
}
@ -146,6 +148,7 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text);
unsigned long va_text, va_end;
unsigned long pgtable_flags;
unsigned long load_delta;
pgdval_t *pgd;
@ -172,6 +175,9 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
if (load_delta & ~PMD_MASK)
for (;;);
va_text = physaddr - p2v_offset;
va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset;
/* Include the SME encryption mask in the fixup value */
load_delta += sme_get_me_mask();
@ -232,7 +238,7 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
pmd_entry += sme_get_me_mask();
pmd_entry += physaddr;
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
int idx = i + (physaddr >> PMD_SHIFT);
pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
@ -257,11 +263,11 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
/* invalidate pages before the kernel image */
for (i = 0; i < pmd_index((unsigned long)_text); i++)
for (i = 0; i < pmd_index(va_text); i++)
pmd[i] &= ~_PAGE_PRESENT;
/* fixup pages that are part of the kernel image */
for (; i <= pmd_index((unsigned long)_end); i++)
for (; i <= pmd_index(va_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
@ -269,7 +275,7 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
return sme_postprocess_startup(bp, pmd);
return sme_postprocess_startup(bp, pmd, p2v_offset);
}
/* Wipe all early page tables except for the kernel symbol map */