2019-06-03 05:44:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 11:49:33 +00:00
|
|
|
/*
|
|
|
|
* AArch64 loadable module support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Limited
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
arm64: module: rework module VA range selection
Currently, the modules region is 128M in size, which is a problem for
some large modules. Shanker reports [1] that the NVIDIA GPU driver alone
can consume 110M of module space in some configurations. We'd like to
make the modules region a full 2G such that we can always make use of a
2G range.
It's possible to build kernel images which are larger than 128M in some
configurations, such as when many debug options are selected and many
drivers are built in. In these configurations, we can't legitimately
select a base for a 128M module region, though we currently select a
value for which allocation will fail. It would be nicer to have a
diagnostic message in this case.
Similarly, in theory it's possible to build a kernel image which is
larger than 2G and which cannot support modules. While this isn't likely
to be the case for any realistic kernel deplyed in the field, it would
be nice if we could print a diagnostic in this case.
This patch reworks the module VA range selection to use a 2G range, and
improves handling of cases where we cannot select legitimate module
regions. We now attempt to select a 128M region and a 2G region:
* The 128M region is selected such that modules can use direct branches
(with JUMP26/CALL26 relocations) to branch to kernel code and other
modules, and so that modules can reference data and text (using PREL32
relocations) anywhere in the kernel image and other modules.
This region covers the entire kernel image (rather than just the text)
to ensure that all PREL32 relocations are in range even when the
kernel data section is absurdly large. Where we cannot allocate from
this region, we'll fall back to the full 2G region.
* The 2G region is selected such that modules can use direct branches
with PLTs to branch to kernel code and other modules, and so that
modules can use reference data and text (with PREL32 relocations) in
the kernel image and other modules.
This region covers the entire kernel image, and the 128M region (if
one is selected).
The two module regions are randomized independently while ensuring the
constraints described above.
[1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Shanker Donthineni <sdonthineni@nvidia.com>
Cc: Will Deacon <will@kernel.org>
Tested-by: Shanker Donthineni <sdonthineni@nvidia.com>
Link: https://lore.kernel.org/r/20230530110328.2213762-7-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-05-30 11:03:28 +00:00
|
|
|
#define pr_fmt(fmt) "Modules: " fmt
|
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/elf.h>
|
2019-10-17 14:26:38 +00:00
|
|
|
#include <linux/ftrace.h>
|
2015-10-12 15:52:58 +00:00
|
|
|
#include <linux/kasan.h>
|
2012-03-05 11:49:33 +00:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/moduleloader.h>
|
2023-05-30 11:03:26 +00:00
|
|
|
#include <linux/random.h>
|
2022-10-27 15:59:08 +00:00
|
|
|
#include <linux/scs.h>
|
2023-05-30 11:03:26 +00:00
|
|
|
|
2015-01-06 00:38:41 +00:00
|
|
|
#include <asm/alternative.h>
|
2014-01-07 14:17:10 +00:00
|
|
|
#include <asm/insn.h>
|
2022-10-27 15:59:08 +00:00
|
|
|
#include <asm/scs.h>
|
2014-11-28 13:40:45 +00:00
|
|
|
#include <asm/sections.h>
|
2014-01-07 14:17:10 +00:00
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
enum aarch64_reloc_op {
|
|
|
|
RELOC_OP_NONE,
|
|
|
|
RELOC_OP_ABS,
|
|
|
|
RELOC_OP_PREL,
|
|
|
|
RELOC_OP_PAGE,
|
|
|
|
};
|
|
|
|
|
2017-06-28 14:56:00 +00:00
|
|
|
static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
|
2012-03-05 11:49:33 +00:00
|
|
|
{
|
|
|
|
switch (reloc_op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
return val;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
return val - (u64)place;
|
|
|
|
case RELOC_OP_PAGE:
|
|
|
|
return (val & ~0xfff) - ((u64)place & ~0xfff);
|
|
|
|
case RELOC_OP_NONE:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
|
|
|
{
|
|
|
|
s64 sval = do_reloc(op, place, val);
|
|
|
|
|
2019-05-23 10:38:54 +00:00
|
|
|
/*
|
|
|
|
* The ELF psABI for AArch64 documents the 16-bit and 32-bit place
|
2019-05-28 14:13:16 +00:00
|
|
|
* relative and absolute relocations as having a range of [-2^15, 2^16)
|
|
|
|
* or [-2^31, 2^32), respectively. However, in order to be able to
|
|
|
|
* detect overflows reliably, we have to choose whether we interpret
|
|
|
|
* such quantities as signed or as unsigned, and stick with it.
|
2019-05-23 10:38:54 +00:00
|
|
|
* The way we organize our address space requires a signed
|
|
|
|
* interpretation of 32-bit relative references, so let's use that
|
|
|
|
* for all R_AARCH64_PRELxx relocations. This means our upper
|
|
|
|
* bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
|
|
|
|
*/
|
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
switch (len) {
|
|
|
|
case 16:
|
|
|
|
*(s16 *)place = sval;
|
2019-05-28 14:13:16 +00:00
|
|
|
switch (op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
if (sval < 0 || sval > U16_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
if (sval < S16_MIN || sval > S16_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid 16-bit data relocation (%d)\n", op);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
*(s32 *)place = sval;
|
2019-05-28 14:13:16 +00:00
|
|
|
switch (op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
if (sval < 0 || sval > U32_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
if (sval < S32_MIN || sval > S32_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid 32-bit data relocation (%d)\n", op);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
*(s64 *)place = sval;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid length (%d) for data relocation\n", len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-05 09:18:51 +00:00
|
|
|
enum aarch64_insn_movw_imm_type {
|
|
|
|
AARCH64_INSN_IMM_MOVNZ,
|
|
|
|
AARCH64_INSN_IMM_MOVKZ,
|
|
|
|
};
|
|
|
|
|
2017-06-28 14:56:00 +00:00
|
|
|
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2016-01-05 09:18:51 +00:00
|
|
|
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
2012-03-05 11:49:33 +00:00
|
|
|
{
|
2016-01-05 09:18:51 +00:00
|
|
|
u64 imm;
|
2014-01-07 14:17:10 +00:00
|
|
|
s64 sval;
|
2017-06-28 14:56:00 +00:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
2014-01-07 14:17:10 +00:00
|
|
|
sval = do_reloc(op, place, val);
|
2016-01-05 09:18:51 +00:00
|
|
|
imm = sval >> lsb;
|
2013-11-05 10:16:52 +00:00
|
|
|
|
2014-01-07 14:17:10 +00:00
|
|
|
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
|
2012-03-05 11:49:33 +00:00
|
|
|
/*
|
|
|
|
* For signed MOVW relocations, we have to manipulate the
|
|
|
|
* instruction encoding depending on whether or not the
|
|
|
|
* immediate is less than zero.
|
|
|
|
*/
|
|
|
|
insn &= ~(3 << 29);
|
2016-01-05 09:18:51 +00:00
|
|
|
if (sval >= 0) {
|
2012-03-05 11:49:33 +00:00
|
|
|
/* >=0: Set the instruction to MOVZ (opcode 10b). */
|
|
|
|
insn |= 2 << 29;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* <0: Set the instruction to MOVN (opcode 00b).
|
|
|
|
* Since we've masked the opcode already, we
|
|
|
|
* don't need to do anything other than
|
|
|
|
* inverting the new immediate field.
|
|
|
|
*/
|
|
|
|
imm = ~imm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the instruction with the new encoding. */
|
2016-01-05 09:18:51 +00:00
|
|
|
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
2017-06-28 14:56:00 +00:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
2016-01-05 09:18:51 +00:00
|
|
|
if (imm > U16_MAX)
|
2012-03-05 11:49:33 +00:00
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-28 14:56:00 +00:00
|
|
|
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2014-01-07 14:17:10 +00:00
|
|
|
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
2012-03-05 11:49:33 +00:00
|
|
|
{
|
|
|
|
u64 imm, imm_mask;
|
|
|
|
s64 sval;
|
2017-06-28 14:56:00 +00:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
|
|
|
/* Calculate the relocation value. */
|
|
|
|
sval = do_reloc(op, place, val);
|
|
|
|
sval >>= lsb;
|
|
|
|
|
|
|
|
/* Extract the value bits and shift them to bit 0. */
|
|
|
|
imm_mask = (BIT(lsb + len) - 1) >> lsb;
|
|
|
|
imm = sval & imm_mask;
|
|
|
|
|
|
|
|
/* Update the instruction's immediate field. */
|
2014-01-07 14:17:10 +00:00
|
|
|
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
2017-06-28 14:56:00 +00:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the upper value bits (including the sign bit) and
|
|
|
|
* shift them to bit 0.
|
|
|
|
*/
|
|
|
|
sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Overflow has occurred if the upper bits are not all equal to
|
|
|
|
* the sign bit of the value.
|
|
|
|
*/
|
|
|
|
if ((u64)(sval + 1) >= 2)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-05 18:53:23 +00:00
|
|
|
static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
|
|
|
__le32 *place, u64 val)
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-06 17:15:33 +00:00
|
|
|
{
|
|
|
|
u32 insn;
|
|
|
|
|
2018-11-22 08:46:46 +00:00
|
|
|
if (!is_forbidden_offset_for_adrp(place))
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-06 17:15:33 +00:00
|
|
|
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
|
|
|
|
AARCH64_INSN_IMM_ADR);
|
|
|
|
|
|
|
|
/* patch ADRP to ADR if it is in range */
|
|
|
|
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
|
|
|
|
AARCH64_INSN_IMM_ADR)) {
|
|
|
|
insn = le32_to_cpu(*place);
|
|
|
|
insn &= ~BIT(31);
|
|
|
|
} else {
|
|
|
|
/* out of range for ADR -> emit a veneer */
|
2018-11-05 18:53:23 +00:00
|
|
|
val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-06 17:15:33 +00:00
|
|
|
if (!val)
|
|
|
|
return -ENOEXEC;
|
|
|
|
insn = aarch64_insn_gen_branch_imm((u64)place, val,
|
|
|
|
AARCH64_INSN_BRANCH_NOLINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
*place = cpu_to_le32(insn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
|
|
const char *strtab,
|
|
|
|
unsigned int symindex,
|
|
|
|
unsigned int relsec,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ovf;
|
|
|
|
bool overflow_check;
|
|
|
|
Elf64_Sym *sym;
|
|
|
|
void *loc;
|
|
|
|
u64 val;
|
|
|
|
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
|
|
|
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
|
|
/* loc corresponds to P in the AArch64 ELF document. */
|
|
|
|
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
|
|
+ rel[i].r_offset;
|
|
|
|
|
|
|
|
/* sym is the ELF symbol we're referring to. */
|
|
|
|
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
|
|
|
+ ELF64_R_SYM(rel[i].r_info);
|
|
|
|
|
|
|
|
/* val corresponds to (S + A) in the AArch64 ELF document. */
|
|
|
|
val = sym->st_value + rel[i].r_addend;
|
|
|
|
|
|
|
|
/* Check for overflow by default. */
|
|
|
|
overflow_check = true;
|
|
|
|
|
|
|
|
/* Perform the static relocation. */
|
|
|
|
switch (ELF64_R_TYPE(rel[i].r_info)) {
|
|
|
|
/* Null relocations. */
|
|
|
|
case R_ARM_NONE:
|
|
|
|
case R_AARCH64_NONE:
|
|
|
|
ovf = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Data relocations. */
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL32:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL16:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* MOVW instruction relocations. */
|
|
|
|
case R_AARCH64_MOVW_UABS_G0_NC:
|
|
|
|
overflow_check = false;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2012-03-05 11:49:33 +00:00
|
|
|
case R_AARCH64_MOVW_UABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G1_NC:
|
|
|
|
overflow_check = false;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2012-03-05 11:49:33 +00:00
|
|
|
case R_AARCH64_MOVW_UABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G2_NC:
|
|
|
|
overflow_check = false;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2012-03-05 11:49:33 +00:00
|
|
|
case R_AARCH64_MOVW_UABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2016-01-05 09:18:51 +00:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Immediate instruction relocations. */
|
|
|
|
case R_AARCH64_LD_PREL_LO19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADR_PREL_LO21:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_ADR);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
|
|
|
overflow_check = false;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2012-03-05 11:49:33 +00:00
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
2018-11-05 18:53:23 +00:00
|
|
|
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-06 17:15:33 +00:00
|
|
|
if (ovf && ovf != -ERANGE)
|
|
|
|
return ovf;
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
|
|
|
case R_AARCH64_LDST8_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST16_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST32_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST128_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_TSTBR14:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_14);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_CONDBR19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
case R_AARCH64_JUMP26:
|
|
|
|
case R_AARCH64_CALL26:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
|
2014-01-07 14:17:10 +00:00
|
|
|
AARCH64_INSN_IMM_26);
|
arm64: module: mandate MODULE_PLTS
Contemporary kernels and modules can be relatively large, especially
when common debug options are enabled. Using GCC 12.1.0, a v6.3-rc7
defconfig kernel is ~38M, and with PROVE_LOCKING + KASAN_INLINE enabled
this expands to ~117M. Shanker reports [1] that the NVIDIA GPU driver
alone can consume 110M of module space in some configurations.
Both KASLR and ARM64_ERRATUM_843419 select MODULE_PLTS, so anyone
wanting a kernel to have KASLR or run on Cortex-A53 will have
MODULE_PLTS selected. This is the case in defconfig and distribution
kernels (e.g. Debian, Android, etc).
Practically speaking, this means we're very likely to need MODULE_PLTS
and while it's almost guaranteed that MODULE_PLTS will be selected, it
is possible to disable support, and we have to maintain some awkward
special cases for such unusual configurations.
This patch removes the MODULE_PLTS config option, with the support code
always enabled if MODULES is selected. This results in a slight
simplification, and will allow for further improvement in subsequent
patches.
For any config which currently selects MODULE_PLTS, there will be no
functional change as a result of this patch.
[1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Shanker Donthineni <sdonthineni@nvidia.com>
Cc: Will Deacon <will@kernel.org>
Tested-by: Shanker Donthineni <sdonthineni@nvidia.com>
Link: https://lore.kernel.org/r/20230530110328.2213762-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-05-30 11:03:27 +00:00
|
|
|
if (ovf == -ERANGE) {
|
2018-11-05 18:53:23 +00:00
|
|
|
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
|
2018-03-06 17:15:31 +00:00
|
|
|
if (!val)
|
|
|
|
return -ENOEXEC;
|
2015-11-24 11:37:35 +00:00
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
|
|
|
|
26, AARCH64_INSN_IMM_26);
|
|
|
|
}
|
2012-03-05 11:49:33 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
pr_err("module %s: unsupported RELA relocation: %llu\n",
|
|
|
|
me->name, ELF64_R_TYPE(rel[i].r_info));
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (overflow_check && ovf == -ERANGE)
|
|
|
|
goto overflow;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
overflow:
|
|
|
|
pr_err("module %s: overflow in relocation type %d val %Lx\n",
|
|
|
|
me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
2014-11-28 13:40:45 +00:00
|
|
|
|
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 15:10:19 +00:00
|
|
|
static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
|
|
|
|
{
|
|
|
|
*plt = get_plt_entry(addr, plt);
|
|
|
|
}
|
|
|
|
|
2019-10-17 14:26:38 +00:00
|
|
|
static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
|
|
|
|
const Elf_Shdr *sechdrs,
|
|
|
|
struct module *mod)
|
|
|
|
{
|
arm64: module: mandate MODULE_PLTS
Contemporary kernels and modules can be relatively large, especially
when common debug options are enabled. Using GCC 12.1.0, a v6.3-rc7
defconfig kernel is ~38M, and with PROVE_LOCKING + KASAN_INLINE enabled
this expands to ~117M. Shanker reports [1] that the NVIDIA GPU driver
alone can consume 110M of module space in some configurations.
Both KASLR and ARM64_ERRATUM_843419 select MODULE_PLTS, so anyone
wanting a kernel to have KASLR or run on Cortex-A53 will have
MODULE_PLTS selected. This is the case in defconfig and distribution
kernels (e.g. Debian, Android, etc).
Practically speaking, this means we're very likely to need MODULE_PLTS
and while it's almost guaranteed that MODULE_PLTS will be selected, it
is possible to disable support, and we have to maintain some awkward
special cases for such unusual configurations.
This patch removes the MODULE_PLTS config option, with the support code
always enabled if MODULES is selected. This results in a slight
simplification, and will allow for further improvement in subsequent
patches.
For any config which currently selects MODULE_PLTS, there will be no
functional change as a result of this patch.
[1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Shanker Donthineni <sdonthineni@nvidia.com>
Cc: Will Deacon <will@kernel.org>
Tested-by: Shanker Donthineni <sdonthineni@nvidia.com>
Link: https://lore.kernel.org/r/20230530110328.2213762-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-05-30 11:03:27 +00:00
|
|
|
#if defined(CONFIG_DYNAMIC_FTRACE)
|
2019-10-17 14:26:38 +00:00
|
|
|
const Elf_Shdr *s;
|
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 15:10:19 +00:00
|
|
|
struct plt_entry *plts;
|
2019-10-17 14:26:38 +00:00
|
|
|
|
|
|
|
s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
|
|
|
|
if (!s)
|
|
|
|
return -ENOEXEC;
|
|
|
|
|
arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.
Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).
For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:
| unsigned long bar(void);
|
| unsigned long foo(void)
| {
| return bar() + 1;
| }
... to:
| <foo>:
| nop
| nop
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl 0 <bar>
| add x0, x0, #0x1
| ldp x29, x30, [sp], #16
| ret
This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:
| mov x9, x30
| bl <ftrace-entry>
Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.
There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.
Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-02-08 15:10:19 +00:00
|
|
|
plts = (void *)s->sh_addr;
|
|
|
|
|
|
|
|
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
|
|
|
|
|
|
|
|
mod->arch.ftrace_trampolines = plts;
|
2019-10-17 14:26:38 +00:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-17 13:03:26 +00:00
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
|
|
const Elf_Shdr *sechdrs,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
const Elf_Shdr *s;
|
|
|
|
s = find_section(hdr, sechdrs, ".altinstructions");
|
|
|
|
if (s)
|
|
|
|
apply_alternatives_module((void *)s->sh_addr, s->sh_size);
|
|
|
|
|
2022-10-27 15:59:08 +00:00
|
|
|
if (scs_is_dynamic()) {
|
|
|
|
s = find_section(hdr, sechdrs, ".init.eh_frame");
|
|
|
|
if (s)
|
2024-02-14 12:28:55 +00:00
|
|
|
__pi_scs_patch((void *)s->sh_addr, s->sh_size);
|
2022-10-27 15:59:08 +00:00
|
|
|
}
|
|
|
|
|
2019-10-17 14:26:38 +00:00
|
|
|
return module_init_ftrace_plt(hdr, sechdrs, me);
|
2014-11-28 13:40:45 +00:00
|
|
|
}
|