mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-18 02:46:06 +00:00
a1b87d54f4
The bare metal decompressor code was never really intended to run in a hosted environment such as the EFI boot services, and does a few things that are becoming problematic in the context of EFI boot now that the logo requirements are getting tighter: EFI executables will no longer be allowed to consist of a single executable section that is mapped with read, write and execute permissions if they are intended for use in a context where Secure Boot is enabled (and where Microsoft's set of certificates is used, i.e., every x86 PC built to run Windows). To avoid stepping on reserved memory before having inspected the E820 tables, and to ensure the correct placement when running a kernel build that is non-relocatable, the bare metal decompressor moves its own executable image to the end of the allocation that was reserved for it, in order to perform the decompression in place. This means the region in question requires both write and execute permissions, which either need to be given upfront (which EFI will no longer permit), or need to be applied on demand using the existing page fault handling framework. However, the physical placement of the kernel is usually randomized anyway, and even if it isn't, a dedicated decompression output buffer can be allocated anywhere in memory using EFI APIs when still running in the boot services, given that EFI support already implies a relocatable kernel. This means that decompression in place is never necessary, nor is moving the compressed image from one end to the other. Since EFI already maps all of memory 1:1, it is also unnecessary to create new page tables or handle page faults when decompressing the kernel. That means there is also no need to replace the special exception handlers for SEV. Generally, there is little need to do any of the things that the decompressor does beyond - initialize SEV encryption, if needed, - perform the 4/5 level paging switch, if needed, - decompress the kernel - relocate the kernel So do all of this from the EFI stub code, and avoid the bare metal decompressor altogether. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20230807162720.545787-24-ardb@kernel.org
189 lines
4.4 KiB
ArmAsm
189 lines
4.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* linux/boot/head.S
|
|
*
|
|
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
|
*/
|
|
|
|
/*
|
|
* head.S contains the 32-bit startup code.
|
|
*
|
|
* NOTE!!! Startup happens at absolute address 0x00001000, which is also where
|
|
* the page directory will exist. The startup code will be overwritten by
|
|
* the page directory. [According to comments etc elsewhere on a compressed
|
|
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
|
|
*
|
|
* Page 0 is deliberately kept safe, since System Management Mode code in
|
|
* laptops may need to access the BIOS data stored there. This is also
|
|
* useful for future device drivers that either access the BIOS via VM86
|
|
* mode.
|
|
*/
|
|
|
|
/*
|
|
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
|
*/
|
|
.text
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/boot.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/bootparam.h>
|
|
|
|
/*
|
|
* These symbols needed to be marked as .hidden to prevent the BFD linker from
|
|
* generating R_386_32 (rather than R_386_RELATIVE) relocations for them when
|
|
* the 32-bit compressed kernel is linked as PIE. This is no longer necessary,
|
|
* but it doesn't hurt to keep them .hidden.
|
|
*/
|
|
.hidden _bss
|
|
.hidden _ebss
|
|
.hidden _end
|
|
|
|
__HEAD
|
|
SYM_FUNC_START(startup_32)
|
|
cld
|
|
cli
|
|
|
|
/*
|
|
* Calculate the delta between where we were compiled to run
|
|
* at and where we were actually loaded at. This can only be done
|
|
* with a short local call on x86. Nothing else will tell us what
|
|
* address we are running at. The reserved chunk of the real-mode
|
|
* data at 0x1e4 (defined as a scratch field) are used as the stack
|
|
* for this calculation. Only 4 bytes are needed.
|
|
*/
|
|
leal (BP_scratch+4)(%esi), %esp
|
|
call 1f
|
|
1: popl %edx
|
|
addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx
|
|
|
|
/* Load new GDT */
|
|
leal gdt@GOTOFF(%edx), %eax
|
|
movl %eax, 2(%eax)
|
|
lgdt (%eax)
|
|
|
|
/* Load segment registers with our descriptors */
|
|
movl $__BOOT_DS, %eax
|
|
movl %eax, %ds
|
|
movl %eax, %es
|
|
movl %eax, %fs
|
|
movl %eax, %gs
|
|
movl %eax, %ss
|
|
|
|
/*
|
|
* %edx contains the address we are loaded at by the boot loader (plus the
|
|
* offset to the GOT). The below code calculates %ebx to be the address where
|
|
* we should move the kernel image temporarily for safe in-place decompression
|
|
* (again, plus the offset to the GOT).
|
|
*
|
|
* %ebp is calculated to be the address that the kernel will be decompressed to.
|
|
*/
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
leal startup_32@GOTOFF(%edx), %ebx
|
|
movl BP_kernel_alignment(%esi), %eax
|
|
decl %eax
|
|
addl %eax, %ebx
|
|
notl %eax
|
|
andl %eax, %ebx
|
|
cmpl $LOAD_PHYSICAL_ADDR, %ebx
|
|
jae 1f
|
|
#endif
|
|
movl $LOAD_PHYSICAL_ADDR, %ebx
|
|
1:
|
|
|
|
movl %ebx, %ebp // Save the output address for later
|
|
/* Target address to relocate to for decompression */
|
|
addl BP_init_size(%esi), %ebx
|
|
subl $_end@GOTOFF, %ebx
|
|
|
|
/* Set up the stack */
|
|
leal boot_stack_end@GOTOFF(%ebx), %esp
|
|
|
|
/* Zero EFLAGS */
|
|
pushl $0
|
|
popfl
|
|
|
|
/*
|
|
* Copy the compressed kernel to the end of our buffer
|
|
* where decompression in place becomes safe.
|
|
*/
|
|
pushl %esi
|
|
leal (_bss@GOTOFF-4)(%edx), %esi
|
|
leal (_bss@GOTOFF-4)(%ebx), %edi
|
|
movl $(_bss - startup_32), %ecx
|
|
shrl $2, %ecx
|
|
std
|
|
rep movsl
|
|
cld
|
|
popl %esi
|
|
|
|
/*
|
|
* The GDT may get overwritten either during the copy we just did or
|
|
* during extract_kernel below. To avoid any issues, repoint the GDTR
|
|
* to the new copy of the GDT.
|
|
*/
|
|
leal gdt@GOTOFF(%ebx), %eax
|
|
movl %eax, 2(%eax)
|
|
lgdt (%eax)
|
|
|
|
/*
|
|
* Jump to the relocated address.
|
|
*/
|
|
leal .Lrelocated@GOTOFF(%ebx), %eax
|
|
jmp *%eax
|
|
SYM_FUNC_END(startup_32)
|
|
|
|
.text
|
|
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
|
|
|
|
/*
|
|
* Clear BSS (stack is currently empty)
|
|
*/
|
|
xorl %eax, %eax
|
|
leal _bss@GOTOFF(%ebx), %edi
|
|
leal _ebss@GOTOFF(%ebx), %ecx
|
|
subl %edi, %ecx
|
|
shrl $2, %ecx
|
|
rep stosl
|
|
|
|
/*
|
|
* Do the extraction, and jump to the new kernel..
|
|
*/
|
|
/* push arguments for extract_kernel: */
|
|
|
|
pushl %ebp /* output address */
|
|
pushl %esi /* real mode pointer */
|
|
call extract_kernel /* returns kernel entry point in %eax */
|
|
addl $24, %esp
|
|
|
|
/*
|
|
* Jump to the extracted kernel.
|
|
*/
|
|
xorl %ebx, %ebx
|
|
jmp *%eax
|
|
SYM_FUNC_END(.Lrelocated)
|
|
|
|
.data
|
|
.balign 8
|
|
SYM_DATA_START_LOCAL(gdt)
|
|
.word gdt_end - gdt - 1
|
|
.long 0
|
|
.word 0
|
|
.quad 0x0000000000000000 /* Reserved */
|
|
.quad 0x00cf9a000000ffff /* __KERNEL_CS */
|
|
.quad 0x00cf92000000ffff /* __KERNEL_DS */
|
|
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
|
|
|
|
/*
|
|
* Stack and heap for uncompression
|
|
*/
|
|
.bss
|
|
.balign 4
|
|
boot_stack:
|
|
.fill BOOT_STACK_SIZE, 1, 0
|
|
boot_stack_end:
|