mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-17 10:26:09 +00:00
03ddd2f17e
The entry for __ex_table was missing, which may make __ex_table become 1- or 2-byte aligned in modules. Add the entry to ensure it gets 32-bit aligned. As per unaligned-memory-access [0] "unaligned memory accesses [...] will not work correctly on certain platforms and will cause performance problems on others", so fix this. Signed-off-by: Helge Deller <deller@gmx.de> [mcgrof: added unaligned-memory-access justification] Link: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/core-api/unaligned-memory-access.rst # [0] Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
70 lines
1.6 KiB
ArmAsm
70 lines
1.6 KiB
ArmAsm
/*
|
|
* Common module linker script, always used when linking a module.
|
|
* Archs are free to supply their own linker scripts. ld will
|
|
* combine them automatically.
|
|
*/
|
|
#ifdef CONFIG_UNWIND_TABLES
|
|
#define DISCARD_EH_FRAME
|
|
#else
|
|
#define DISCARD_EH_FRAME *(.eh_frame)
|
|
#endif
|
|
|
|
#include <asm-generic/codetag.lds.h>
|
|
|
|
SECTIONS {
|
|
/DISCARD/ : {
|
|
*(.discard)
|
|
*(.discard.*)
|
|
*(.export_symbol)
|
|
}
|
|
|
|
__ksymtab 0 : ALIGN(8) { *(SORT(___ksymtab+*)) }
|
|
__ksymtab_gpl 0 : ALIGN(8) { *(SORT(___ksymtab_gpl+*)) }
|
|
__kcrctab 0 : ALIGN(4) { *(SORT(___kcrctab+*)) }
|
|
__kcrctab_gpl 0 : ALIGN(4) { *(SORT(___kcrctab_gpl+*)) }
|
|
|
|
.ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
|
|
.init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
|
|
|
|
.altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) }
|
|
__bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) }
|
|
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
|
|
__ex_table 0 : ALIGN(4) { KEEP(*(__ex_table)) }
|
|
|
|
__patchable_function_entries : { *(__patchable_function_entries) }
|
|
|
|
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
|
|
__kcfi_traps : { KEEP(*(.kcfi_traps)) }
|
|
#endif
|
|
|
|
#ifdef CONFIG_LTO_CLANG
|
|
/*
|
|
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
|
|
* -ffunction-sections, which increases the size of the final module.
|
|
* Merge the split sections in the final binary.
|
|
*/
|
|
.bss : {
|
|
*(.bss .bss.[0-9a-zA-Z_]*)
|
|
*(.bss..L*)
|
|
}
|
|
|
|
.data : {
|
|
*(.data .data.[0-9a-zA-Z_]*)
|
|
*(.data..L*)
|
|
CODETAG_SECTIONS()
|
|
}
|
|
|
|
.rodata : {
|
|
*(.rodata .rodata.[0-9a-zA-Z_]*)
|
|
*(.rodata..L*)
|
|
}
|
|
#else
|
|
.data : {
|
|
CODETAG_SECTIONS()
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/* bring in arch-specific sections */
|
|
#include <asm/module.lds.h>
|