mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-10 15:58:47 +00:00
selftests/bpf: validate zero preservation for sub-slot loads
Validate that 1-, 2-, and 4-byte loads from stack slots not aligned on 8-byte boundary still preserve zero, when loading from all-STACK_ZERO sub-slots, or when stack sub-slots are covered by spilled register with known constant zero value. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20231205184248.1502704-8-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
e322f0bcb8
commit
add1cd7f22
@ -490,4 +490,75 @@ __naked void spill_subregs_preserve_stack_zero(void)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char single_byte_buf[1] SEC(".data.single_byte_buf");
|
||||
|
||||
SEC("raw_tp")
|
||||
__log_level(2)
|
||||
__success
|
||||
__naked void partial_stack_load_preserves_zeros(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* fp-8 is all STACK_ZERO */
|
||||
".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */
|
||||
|
||||
/* fp-16 is const zero register */
|
||||
"r0 = 0;"
|
||||
"*(u64 *)(r10 -16) = r0;"
|
||||
|
||||
/* load single U8 from non-aligned STACK_ZERO slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u8 *)(r10 -1);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* load single U8 from non-aligned ZERO REG slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u8 *)(r10 -9);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* load single U16 from non-aligned STACK_ZERO slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u16 *)(r10 -2);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* load single U16 from non-aligned ZERO REG slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u16 *)(r10 -10);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* load single U32 from non-aligned STACK_ZERO slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u32 *)(r10 -4);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* load single U32 from non-aligned ZERO REG slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u32 *)(r10 -12);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* for completeness, load U64 from STACK_ZERO slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u64 *)(r10 -8);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
/* for completeness, load U64 from ZERO REG slot */
|
||||
"r1 = %[single_byte_buf];"
|
||||
"r2 = *(u64 *)(r10 -16);"
|
||||
"r1 += r2;"
|
||||
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
|
||||
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(single_byte_buf),
|
||||
__imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0))
|
||||
: __clobber_common);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
Loading…
x
Reference in New Issue
Block a user