2021-03-22 16:37:52 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* eBPF JIT compiler for PPC32
|
|
|
|
*
|
|
|
|
* Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
|
|
|
|
* CS GROUP France
|
|
|
|
*
|
|
|
|
* Based on PPC64 eBPF JIT compiler by Naveen N. Rao
|
|
|
|
*/
|
|
|
|
#include <linux/moduleloader.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/asm-compat.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/filter.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <asm/kprobes.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
|
|
|
|
#include "bpf_jit.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stack layout:
|
|
|
|
*
|
|
|
|
* [ prev sp ] <-------------
|
|
|
|
* [ nv gpr save area ] 16 * 4 |
|
|
|
|
* fp (r31) --> [ ebpf stack space ] upto 512 |
|
|
|
|
* [ frame header ] 16 |
|
|
|
|
* sp (r1) ---> [ stack pointer ] --------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* for gpr non volatile registers r17 to r31 (14) + tail call */
|
|
|
|
#define BPF_PPC_STACK_SAVE (15 * 4 + 4)
|
|
|
|
/* stack frame, ensure this is quadword aligned */
|
|
|
|
#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
|
|
|
|
|
2022-02-14 10:41:47 +00:00
|
|
|
#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
|
|
|
|
|
2022-02-14 10:41:51 +00:00
|
|
|
/* PPC NVR range -- update this if we ever use NVRs below r17 */
|
|
|
|
#define BPF_PPC_NVR_MIN _R17
|
|
|
|
#define BPF_PPC_TC _R16
|
|
|
|
|
|
|
|
/* BPF register usage */
|
|
|
|
#define TMP_REG (MAX_BPF_JIT_REG + 0)
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/* BPF to ppc register mappings */
|
2022-02-14 10:41:51 +00:00
|
|
|
void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
|
|
|
|
{
|
2021-03-22 16:37:52 +00:00
|
|
|
/* function return value */
|
2022-02-14 10:41:51 +00:00
|
|
|
ctx->b2p[BPF_REG_0] = _R12;
|
2021-03-22 16:37:52 +00:00
|
|
|
/* function arguments */
|
2022-02-14 10:41:51 +00:00
|
|
|
ctx->b2p[BPF_REG_1] = _R4;
|
|
|
|
ctx->b2p[BPF_REG_2] = _R6;
|
|
|
|
ctx->b2p[BPF_REG_3] = _R8;
|
|
|
|
ctx->b2p[BPF_REG_4] = _R10;
|
|
|
|
ctx->b2p[BPF_REG_5] = _R22;
|
2021-03-22 16:37:52 +00:00
|
|
|
/* non volatile registers */
|
2022-02-14 10:41:51 +00:00
|
|
|
ctx->b2p[BPF_REG_6] = _R24;
|
|
|
|
ctx->b2p[BPF_REG_7] = _R26;
|
|
|
|
ctx->b2p[BPF_REG_8] = _R28;
|
|
|
|
ctx->b2p[BPF_REG_9] = _R30;
|
2021-03-22 16:37:52 +00:00
|
|
|
/* frame pointer aka BPF_REG_10 */
|
2022-02-14 10:41:51 +00:00
|
|
|
ctx->b2p[BPF_REG_FP] = _R18;
|
2021-03-22 16:37:52 +00:00
|
|
|
/* eBPF jit internal registers */
|
2022-02-14 10:41:51 +00:00
|
|
|
ctx->b2p[BPF_REG_AX] = _R20;
|
|
|
|
ctx->b2p[TMP_REG] = _R31; /* 32 bits */
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
|
|
|
|
{
|
|
|
|
if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
|
|
|
|
return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
|
|
|
|
|
|
|
|
WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
|
|
|
|
/* Use the hole we have left for alignment */
|
|
|
|
return BPF_PPC_STACKFRAME(ctx) - 4;
|
|
|
|
}
|
|
|
|
|
2022-01-10 12:29:42 +00:00
|
|
|
#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
|
|
|
|
#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
|
|
|
|
#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
|
|
|
|
|
2023-02-01 10:04:25 +00:00
|
|
|
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We only need a stack frame if:
|
|
|
|
* - we call other functions (kernel helpers), or
|
|
|
|
* - we use non volatile registers, or
|
|
|
|
* - we use tail call counter
|
|
|
|
* - the bpf program uses its stack area
|
|
|
|
* The latter condition is deduced from the usage of BPF_REG_FP
|
|
|
|
*/
|
|
|
|
return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
|
|
|
|
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
|
|
|
|
}
|
|
|
|
|
powerpc/bpf: Reallocate BPF registers to volatile registers when possible on PPC32
When the BPF routine doesn't call any function, the non volatile
registers can be reallocated to volatile registers in order to
avoid having to save them/restore on the stack.
Before this patch, the test #359 ADD default X is:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 92 e1 00 2c stw r23,44(r1)
14: 93 01 00 30 stw r24,48(r1)
18: 93 21 00 34 stw r25,52(r1)
1c: 93 41 00 38 stw r26,56(r1)
20: 39 80 00 00 li r12,0
24: 39 60 00 00 li r11,0
28: 3b 40 00 00 li r26,0
2c: 3b 20 00 00 li r25,0
30: 7c 98 23 78 mr r24,r4
34: 7c 77 1b 78 mr r23,r3
38: 39 80 00 42 li r12,66
3c: 39 60 00 00 li r11,0
40: 7d 8c d2 14 add r12,r12,r26
44: 39 60 00 00 li r11,0
48: 7d 83 63 78 mr r3,r12
4c: 82 e1 00 2c lwz r23,44(r1)
50: 83 01 00 30 lwz r24,48(r1)
54: 83 21 00 34 lwz r25,52(r1)
58: 83 41 00 38 lwz r26,56(r1)
5c: 38 21 00 50 addi r1,r1,80
60: 4e 80 00 20 blr
After this patch, the same test has become:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 39 80 00 00 li r12,0
14: 39 60 00 00 li r11,0
18: 39 00 00 00 li r8,0
1c: 38 e0 00 00 li r7,0
20: 7c 86 23 78 mr r6,r4
24: 7c 65 1b 78 mr r5,r3
28: 39 80 00 42 li r12,66
2c: 39 60 00 00 li r11,0
30: 7d 8c 42 14 add r12,r12,r8
34: 39 60 00 00 li r11,0
38: 7d 83 63 78 mr r3,r12
3c: 38 21 00 50 addi r1,r1,80
40: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b94562d7d2bb21aec89de0c40bb3cd91054b65a2.1616430991.git.christophe.leroy@csgroup.eu
2021-03-22 16:37:53 +00:00
|
|
|
void bpf_jit_realloc_regs(struct codegen_context *ctx)
|
|
|
|
{
|
2022-01-10 12:29:42 +00:00
|
|
|
unsigned int nvreg_mask;
|
|
|
|
|
powerpc/bpf: Reallocate BPF registers to volatile registers when possible on PPC32
When the BPF routine doesn't call any function, the non volatile
registers can be reallocated to volatile registers in order to
avoid having to save them/restore on the stack.
Before this patch, the test #359 ADD default X is:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 92 e1 00 2c stw r23,44(r1)
14: 93 01 00 30 stw r24,48(r1)
18: 93 21 00 34 stw r25,52(r1)
1c: 93 41 00 38 stw r26,56(r1)
20: 39 80 00 00 li r12,0
24: 39 60 00 00 li r11,0
28: 3b 40 00 00 li r26,0
2c: 3b 20 00 00 li r25,0
30: 7c 98 23 78 mr r24,r4
34: 7c 77 1b 78 mr r23,r3
38: 39 80 00 42 li r12,66
3c: 39 60 00 00 li r11,0
40: 7d 8c d2 14 add r12,r12,r26
44: 39 60 00 00 li r11,0
48: 7d 83 63 78 mr r3,r12
4c: 82 e1 00 2c lwz r23,44(r1)
50: 83 01 00 30 lwz r24,48(r1)
54: 83 21 00 34 lwz r25,52(r1)
58: 83 41 00 38 lwz r26,56(r1)
5c: 38 21 00 50 addi r1,r1,80
60: 4e 80 00 20 blr
After this patch, the same test has become:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 39 80 00 00 li r12,0
14: 39 60 00 00 li r11,0
18: 39 00 00 00 li r8,0
1c: 38 e0 00 00 li r7,0
20: 7c 86 23 78 mr r6,r4
24: 7c 65 1b 78 mr r5,r3
28: 39 80 00 42 li r12,66
2c: 39 60 00 00 li r11,0
30: 7d 8c 42 14 add r12,r12,r8
34: 39 60 00 00 li r11,0
38: 7d 83 63 78 mr r3,r12
3c: 38 21 00 50 addi r1,r1,80
40: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b94562d7d2bb21aec89de0c40bb3cd91054b65a2.1616430991.git.christophe.leroy@csgroup.eu
2021-03-22 16:37:53 +00:00
|
|
|
if (ctx->seen & SEEN_FUNC)
|
2022-01-10 12:29:42 +00:00
|
|
|
nvreg_mask = SEEN_NVREG_TEMP_MASK;
|
|
|
|
else
|
|
|
|
nvreg_mask = SEEN_NVREG_FULL_MASK;
|
powerpc/bpf: Reallocate BPF registers to volatile registers when possible on PPC32
When the BPF routine doesn't call any function, the non volatile
registers can be reallocated to volatile registers in order to
avoid having to save them/restore on the stack.
Before this patch, the test #359 ADD default X is:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 92 e1 00 2c stw r23,44(r1)
14: 93 01 00 30 stw r24,48(r1)
18: 93 21 00 34 stw r25,52(r1)
1c: 93 41 00 38 stw r26,56(r1)
20: 39 80 00 00 li r12,0
24: 39 60 00 00 li r11,0
28: 3b 40 00 00 li r26,0
2c: 3b 20 00 00 li r25,0
30: 7c 98 23 78 mr r24,r4
34: 7c 77 1b 78 mr r23,r3
38: 39 80 00 42 li r12,66
3c: 39 60 00 00 li r11,0
40: 7d 8c d2 14 add r12,r12,r26
44: 39 60 00 00 li r11,0
48: 7d 83 63 78 mr r3,r12
4c: 82 e1 00 2c lwz r23,44(r1)
50: 83 01 00 30 lwz r24,48(r1)
54: 83 21 00 34 lwz r25,52(r1)
58: 83 41 00 38 lwz r26,56(r1)
5c: 38 21 00 50 addi r1,r1,80
60: 4e 80 00 20 blr
After this patch, the same test has become:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 39 80 00 00 li r12,0
14: 39 60 00 00 li r11,0
18: 39 00 00 00 li r8,0
1c: 38 e0 00 00 li r7,0
20: 7c 86 23 78 mr r6,r4
24: 7c 65 1b 78 mr r5,r3
28: 39 80 00 42 li r12,66
2c: 39 60 00 00 li r11,0
30: 7d 8c 42 14 add r12,r12,r8
34: 39 60 00 00 li r11,0
38: 7d 83 63 78 mr r3,r12
3c: 38 21 00 50 addi r1,r1,80
40: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b94562d7d2bb21aec89de0c40bb3cd91054b65a2.1616430991.git.christophe.leroy@csgroup.eu
2021-03-22 16:37:53 +00:00
|
|
|
|
2022-01-10 12:29:42 +00:00
|
|
|
while (ctx->seen & nvreg_mask &&
|
powerpc/bpf: Reallocate BPF registers to volatile registers when possible on PPC32
When the BPF routine doesn't call any function, the non volatile
registers can be reallocated to volatile registers in order to
avoid having to save them/restore on the stack.
Before this patch, the test #359 ADD default X is:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 92 e1 00 2c stw r23,44(r1)
14: 93 01 00 30 stw r24,48(r1)
18: 93 21 00 34 stw r25,52(r1)
1c: 93 41 00 38 stw r26,56(r1)
20: 39 80 00 00 li r12,0
24: 39 60 00 00 li r11,0
28: 3b 40 00 00 li r26,0
2c: 3b 20 00 00 li r25,0
30: 7c 98 23 78 mr r24,r4
34: 7c 77 1b 78 mr r23,r3
38: 39 80 00 42 li r12,66
3c: 39 60 00 00 li r11,0
40: 7d 8c d2 14 add r12,r12,r26
44: 39 60 00 00 li r11,0
48: 7d 83 63 78 mr r3,r12
4c: 82 e1 00 2c lwz r23,44(r1)
50: 83 01 00 30 lwz r24,48(r1)
54: 83 21 00 34 lwz r25,52(r1)
58: 83 41 00 38 lwz r26,56(r1)
5c: 38 21 00 50 addi r1,r1,80
60: 4e 80 00 20 blr
After this patch, the same test has become:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 39 80 00 00 li r12,0
14: 39 60 00 00 li r11,0
18: 39 00 00 00 li r8,0
1c: 38 e0 00 00 li r7,0
20: 7c 86 23 78 mr r6,r4
24: 7c 65 1b 78 mr r5,r3
28: 39 80 00 42 li r12,66
2c: 39 60 00 00 li r11,0
30: 7d 8c 42 14 add r12,r12,r8
34: 39 60 00 00 li r11,0
38: 7d 83 63 78 mr r3,r12
3c: 38 21 00 50 addi r1,r1,80
40: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b94562d7d2bb21aec89de0c40bb3cd91054b65a2.1616430991.git.christophe.leroy@csgroup.eu
2021-03-22 16:37:53 +00:00
|
|
|
(ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
|
2022-01-10 12:29:42 +00:00
|
|
|
int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
|
powerpc/bpf: Reallocate BPF registers to volatile registers when possible on PPC32
When the BPF routine doesn't call any function, the non volatile
registers can be reallocated to volatile registers in order to
avoid having to save them/restore on the stack.
Before this patch, the test #359 ADD default X is:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 92 e1 00 2c stw r23,44(r1)
14: 93 01 00 30 stw r24,48(r1)
18: 93 21 00 34 stw r25,52(r1)
1c: 93 41 00 38 stw r26,56(r1)
20: 39 80 00 00 li r12,0
24: 39 60 00 00 li r11,0
28: 3b 40 00 00 li r26,0
2c: 3b 20 00 00 li r25,0
30: 7c 98 23 78 mr r24,r4
34: 7c 77 1b 78 mr r23,r3
38: 39 80 00 42 li r12,66
3c: 39 60 00 00 li r11,0
40: 7d 8c d2 14 add r12,r12,r26
44: 39 60 00 00 li r11,0
48: 7d 83 63 78 mr r3,r12
4c: 82 e1 00 2c lwz r23,44(r1)
50: 83 01 00 30 lwz r24,48(r1)
54: 83 21 00 34 lwz r25,52(r1)
58: 83 41 00 38 lwz r26,56(r1)
5c: 38 21 00 50 addi r1,r1,80
60: 4e 80 00 20 blr
After this patch, the same test has become:
0: 7c 64 1b 78 mr r4,r3
4: 38 60 00 00 li r3,0
8: 94 21 ff b0 stwu r1,-80(r1)
c: 60 00 00 00 nop
10: 39 80 00 00 li r12,0
14: 39 60 00 00 li r11,0
18: 39 00 00 00 li r8,0
1c: 38 e0 00 00 li r7,0
20: 7c 86 23 78 mr r6,r4
24: 7c 65 1b 78 mr r5,r3
28: 39 80 00 42 li r12,66
2c: 39 60 00 00 li r11,0
30: 7d 8c 42 14 add r12,r12,r8
34: 39 60 00 00 li r11,0
38: 7d 83 63 78 mr r3,r12
3c: 38 21 00 50 addi r1,r1,80
40: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b94562d7d2bb21aec89de0c40bb3cd91054b65a2.1616430991.git.christophe.leroy@csgroup.eu
2021-03-22 16:37:53 +00:00
|
|
|
int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = BPF_REG_0; i <= TMP_REG; i++) {
|
|
|
|
if (ctx->b2p[i] != old)
|
|
|
|
continue;
|
|
|
|
ctx->b2p[i] = new;
|
|
|
|
bpf_set_seen_register(ctx, new);
|
|
|
|
bpf_clear_seen_register(ctx, old);
|
|
|
|
if (i != TMP_REG) {
|
|
|
|
bpf_set_seen_register(ctx, new - 1);
|
|
|
|
bpf_clear_seen_register(ctx, old - 1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
powerpc64/bpf: Add support for bpf trampolines
Add support for bpf_arch_text_poke() and arch_prepare_bpf_trampoline()
for 64-bit powerpc. While the code is generic, BPF trampolines are only
enabled on 64-bit powerpc. 32-bit powerpc will need testing and some
updates.
BPF Trampolines adhere to the existing ftrace ABI utilizing a
two-instruction profiling sequence, as well as the newer ABI utilizing a
three-instruction profiling sequence enabling return with a 'blr'. The
trampoline code itself closely follows x86 implementation.
BPF prog JIT is extended to mimic 64-bit powerpc approach for ftrace
having a single nop at function entry, followed by the function
profiling sequence out-of-line and a separate long branch stub for calls
to trampolines that are out of range. A dummy_tramp is provided to
simplify synchronization similar to arm64.
When attaching a bpf trampoline to a bpf prog, we can patch up to three
things:
- the nop at bpf prog entry to go to the out-of-line stub
- the instruction in the out-of-line stub to either call the bpf trampoline
directly, or to branch to the long_branch stub.
- the trampoline address before the long_branch stub.
We do not need any synchronization here since we always have a valid
branch target regardless of the order in which the above stores are
seen. dummy_tramp ensures that the long_branch stub goes to a valid
destination on other cpus, even when the branch to the long_branch stub
is seen before the updated trampoline address.
However, when detaching a bpf trampoline from a bpf prog, or if changing
the bpf trampoline address, we need synchronization to ensure that other
cpus can no longer branch into the older trampoline so that it can be
safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus
make forward progress, but we still need to ensure that other cpus
execute isync (or some CSI) so that they don't go back into the
trampoline again. While here, update the stale comment that describes
the redzone usage in ppc64 BPF JIT.
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/20241030070850.1361304-18-hbathini@linux.ibm.com
2024-10-30 07:08:50 +00:00
|
|
|
/* Instruction for trampoline attach */
|
|
|
|
EMIT(PPC_RAW_NOP());
|
|
|
|
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
/* Initialize tail_call_cnt, to be skipped if we do tail calls. */
|
2023-02-01 10:04:24 +00:00
|
|
|
if (ctx->seen & SEEN_TAILCALL)
|
|
|
|
EMIT(PPC_RAW_LI(_R4, 0));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_NOP());
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
|
powerpc64/bpf: Add support for bpf trampolines
Add support for bpf_arch_text_poke() and arch_prepare_bpf_trampoline()
for 64-bit powerpc. While the code is generic, BPF trampolines are only
enabled on 64-bit powerpc. 32-bit powerpc will need testing and some
updates.
BPF Trampolines adhere to the existing ftrace ABI utilizing a
two-instruction profiling sequence, as well as the newer ABI utilizing a
three-instruction profiling sequence enabling return with a 'blr'. The
trampoline code itself closely follows x86 implementation.
BPF prog JIT is extended to mimic 64-bit powerpc approach for ftrace
having a single nop at function entry, followed by the function
profiling sequence out-of-line and a separate long branch stub for calls
to trampolines that are out of range. A dummy_tramp is provided to
simplify synchronization similar to arm64.
When attaching a bpf trampoline to a bpf prog, we can patch up to three
things:
- the nop at bpf prog entry to go to the out-of-line stub
- the instruction in the out-of-line stub to either call the bpf trampoline
directly, or to branch to the long_branch stub.
- the trampoline address before the long_branch stub.
We do not need any synchronization here since we always have a valid
branch target regardless of the order in which the above stores are
seen. dummy_tramp ensures that the long_branch stub goes to a valid
destination on other cpus, even when the branch to the long_branch stub
is seen before the updated trampoline address.
However, when detaching a bpf trampoline from a bpf prog, or if changing
the bpf trampoline address, we need synchronization to ensure that other
cpus can no longer branch into the older trampoline so that it can be
safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus
make forward progress, but we still need to ensure that other cpus
execute isync (or some CSI) so that they don't go back into the
trampoline again. While here, update the stale comment that describes
the redzone usage in ppc64 BPF JIT.
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/20241030070850.1361304-18-hbathini@linux.ibm.com
2024-10-30 07:08:50 +00:00
|
|
|
#define BPF_TAILCALL_PROLOGUE_SIZE 8
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
|
2023-02-01 10:04:25 +00:00
|
|
|
if (bpf_has_stack_frame(ctx))
|
|
|
|
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
2021-05-20 10:23:08 +00:00
|
|
|
if (ctx->seen & SEEN_TAILCALL)
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
/* First arg comes in as a 32 bits pointer. */
|
|
|
|
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
|
|
|
|
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need a stack frame, but we don't necessarily need to
|
|
|
|
* save/restore LR unless we call other functions
|
|
|
|
*/
|
|
|
|
if (ctx->seen & SEEN_FUNC)
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MFLR(_R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Back up non-volatile regs -- registers r18-r31
|
|
|
|
*/
|
|
|
|
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
|
|
|
|
if (bpf_is_seen_register(ctx, i))
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/* Setup frame pointer to point to the bpf stack area */
|
2022-02-14 10:41:51 +00:00
|
|
|
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
|
|
|
|
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
|
|
|
|
EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
|
2021-03-22 16:37:52 +00:00
|
|
|
STACK_FRAME_MIN_SIZE + ctx->stack_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->seen & SEEN_FUNC)
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Restore NVRs */
|
|
|
|
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
|
|
|
|
if (bpf_is_seen_register(ctx, i))
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
if (ctx->seen & SEEN_FUNC)
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
/* Tear down our stack frame */
|
2023-02-01 10:04:25 +00:00
|
|
|
if (bpf_has_stack_frame(ctx))
|
|
|
|
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
if (ctx->seen & SEEN_FUNC)
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MTLR(_R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
|
|
|
{
|
|
|
|
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
|
|
|
|
|
|
|
|
bpf_jit_emit_common_epilogue(image, ctx);
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_BLR());
|
powerpc64/bpf: Add support for bpf trampolines
Add support for bpf_arch_text_poke() and arch_prepare_bpf_trampoline()
for 64-bit powerpc. While the code is generic, BPF trampolines are only
enabled on 64-bit powerpc. 32-bit powerpc will need testing and some
updates.
BPF Trampolines adhere to the existing ftrace ABI utilizing a
two-instruction profiling sequence, as well as the newer ABI utilizing a
three-instruction profiling sequence enabling return with a 'blr'. The
trampoline code itself closely follows x86 implementation.
BPF prog JIT is extended to mimic 64-bit powerpc approach for ftrace
having a single nop at function entry, followed by the function
profiling sequence out-of-line and a separate long branch stub for calls
to trampolines that are out of range. A dummy_tramp is provided to
simplify synchronization similar to arm64.
When attaching a bpf trampoline to a bpf prog, we can patch up to three
things:
- the nop at bpf prog entry to go to the out-of-line stub
- the instruction in the out-of-line stub to either call the bpf trampoline
directly, or to branch to the long_branch stub.
- the trampoline address before the long_branch stub.
We do not need any synchronization here since we always have a valid
branch target regardless of the order in which the above stores are
seen. dummy_tramp ensures that the long_branch stub goes to a valid
destination on other cpus, even when the branch to the long_branch stub
is seen before the updated trampoline address.
However, when detaching a bpf trampoline from a bpf prog, or if changing
the bpf trampoline address, we need synchronization to ensure that other
cpus can no longer branch into the older trampoline so that it can be
safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus
make forward progress, but we still need to ensure that other cpus
execute isync (or some CSI) so that they don't go back into the
trampoline again. While here, update the stale comment that describes
the redzone usage in ppc64 BPF JIT.
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/20241030070850.1361304-18-hbathini@linux.ibm.com
2024-10-30 07:08:50 +00:00
|
|
|
|
|
|
|
bpf_jit_build_fentry_stubs(image, ctx);
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
2023-10-20 14:13:58 +00:00
|
|
|
/* Relative offset needs to be calculated based on final image location */
|
|
|
|
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
|
2021-03-22 16:37:52 +00:00
|
|
|
{
|
2023-10-20 14:13:58 +00:00
|
|
|
s32 rel = (s32)func - (s32)(fimage + ctx->idx);
|
2021-04-12 11:44:18 +00:00
|
|
|
|
|
|
|
if (image && rel < 0x2000000 && rel >= -0x2000000) {
|
2023-10-20 14:13:58 +00:00
|
|
|
EMIT(PPC_RAW_BL(rel));
|
2021-04-12 11:44:18 +00:00
|
|
|
} else {
|
|
|
|
/* Load function address into r0 */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
|
|
|
|
EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
|
2021-06-09 09:00:24 +00:00
|
|
|
EMIT(PPC_RAW_MTCTR(_R0));
|
|
|
|
EMIT(PPC_RAW_BCTRL());
|
2021-04-12 11:44:18 +00:00
|
|
|
}
|
2022-02-14 10:41:42 +00:00
|
|
|
|
|
|
|
return 0;
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
2021-10-05 20:25:21 +00:00
|
|
|
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
2021-03-22 16:37:52 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* By now, the eBPF program has already setup parameters in r3-r6
|
|
|
|
* r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
|
|
|
|
* r5-r6/BPF_REG_2 - pointer to bpf_array
|
|
|
|
* r7-r8/BPF_REG_3 - index in bpf_array
|
|
|
|
*/
|
2022-02-14 10:41:51 +00:00
|
|
|
int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
|
|
|
|
int b2p_index = bpf_to_ppc(BPF_REG_3);
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if (index >= array->map.max_entries)
|
|
|
|
* goto out;
|
|
|
|
*/
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
|
|
|
|
EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
|
|
|
|
EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
|
2022-02-14 10:41:36 +00:00
|
|
|
PPC_BCC_SHORT(COND_GE, out);
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/*
|
bpf: Change value of MAX_TAIL_CALL_CNT from 32 to 33
In the current code, the actual max tail call count is 33 which is greater
than MAX_TAIL_CALL_CNT (defined as 32). The actual limit is not consistent
with the meaning of MAX_TAIL_CALL_CNT and thus confusing at first glance.
We can see the historical evolution from commit 04fd61ab36ec ("bpf: allow
bpf programs to tail-call other bpf programs") and commit f9dabe016b63
("bpf: Undo off-by-one in interpreter tail call count limit"). In order
to avoid changing existing behavior, the actual limit is 33 now, this is
reasonable.
After commit 874be05f525e ("bpf, tests: Add tail call test suite"), we can
see there exists failed testcase.
On all archs when CONFIG_BPF_JIT_ALWAYS_ON is not set:
# echo 0 > /proc/sys/net/core/bpf_jit_enable
# modprobe test_bpf
# dmesg | grep -w FAIL
Tail call error path, max count reached jited:0 ret 34 != 33 FAIL
On some archs:
# echo 1 > /proc/sys/net/core/bpf_jit_enable
# modprobe test_bpf
# dmesg | grep -w FAIL
Tail call error path, max count reached jited:1 ret 34 != 33 FAIL
Although the above failed testcase has been fixed in commit 18935a72eb25
("bpf/tests: Fix error in tail call limit tests"), it would still be good
to change the value of MAX_TAIL_CALL_CNT from 32 to 33 to make the code
more readable.
The 32-bit x86 JIT was using a limit of 32, just fix the wrong comments and
limit to 33 tail calls as the constant MAX_TAIL_CALL_CNT updated. For the
mips64 JIT, use "ori" instead of "addiu" as suggested by Johan Almbladh.
For the riscv JIT, use RV_REG_TCC directly to save one register move as
suggested by Björn Töpel. For the other implementations, no function changes,
it does not change the current limit 33, the new value of MAX_TAIL_CALL_CNT
can reflect the actual max tail call count, the related tail call testcases
in test_bpf module and selftests can work well for the interpreter and the
JIT.
Here are the test results on x86_64:
# uname -m
x86_64
# echo 0 > /proc/sys/net/core/bpf_jit_enable
# modprobe test_bpf test_suite=test_tail_calls
# dmesg | tail -1
test_bpf: test_tail_calls: Summary: 8 PASSED, 0 FAILED, [0/8 JIT'ed]
# rmmod test_bpf
# echo 1 > /proc/sys/net/core/bpf_jit_enable
# modprobe test_bpf test_suite=test_tail_calls
# dmesg | tail -1
test_bpf: test_tail_calls: Summary: 8 PASSED, 0 FAILED, [8/8 JIT'ed]
# rmmod test_bpf
# ./test_progs -t tailcalls
#142 tailcalls:OK
Summary: 1/11 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Björn Töpel <bjorn@kernel.org>
Acked-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Link: https://lore.kernel.org/bpf/1636075800-3264-1-git-send-email-yangtiezhu@loongson.cn
2021-11-05 01:30:00 +00:00
|
|
|
* if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
|
2021-03-22 16:37:52 +00:00
|
|
|
* goto out;
|
|
|
|
*/
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* tail_call_cnt++; */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
|
2022-02-14 10:41:36 +00:00
|
|
|
PPC_BCC_SHORT(COND_GE, out);
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/* prog = array->ptrs[index]; */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
|
|
|
|
EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
|
|
|
|
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if (prog == NULL)
|
|
|
|
* goto out;
|
|
|
|
*/
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_CMPLWI(_R3, 0));
|
2022-02-14 10:41:36 +00:00
|
|
|
PPC_BCC_SHORT(COND_EQ, out);
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
/* goto *(prog->bpf_func + prologue_size); */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
|
|
|
|
EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
|
|
|
|
EMIT(PPC_RAW_MTCTR(_R3));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
2022-02-14 10:41:51 +00:00
|
|
|
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
|
2021-03-22 16:37:52 +00:00
|
|
|
|
powerpc/bpf/32: Fix Oops on tail call tests
test_bpf tail call tests end up as:
test_bpf: #0 Tail call leaf jited:1 85 PASS
test_bpf: #1 Tail call 2 jited:1 111 PASS
test_bpf: #2 Tail call 3 jited:1 145 PASS
test_bpf: #3 Tail call 4 jited:1 170 PASS
test_bpf: #4 Tail call load/store leaf jited:1 190 PASS
test_bpf: #5 Tail call load/store jited:1
BUG: Unable to handle kernel data access on write at 0xf1b4e000
Faulting instruction address: 0xbe86b710
Oops: Kernel access of bad area, sig: 11 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in: test_bpf(+)
CPU: 0 PID: 97 Comm: insmod Not tainted 6.1.0-rc4+ #195
Hardware name: PowerMac3,1 750CL 0x87210 PowerMac
NIP: be86b710 LR: be857e88 CTR: be86b704
REGS: f1b4df20 TRAP: 0300 Not tainted (6.1.0-rc4+)
MSR: 00009032 <EE,ME,IR,DR,RI> CR: 28008242 XER: 00000000
DAR: f1b4e000 DSISR: 42000000
GPR00: 00000001 f1b4dfe0 c11d2280 00000000 00000000 00000000 00000002 00000000
GPR08: f1b4e000 be86b704 f1b4e000 00000000 00000000 100d816a f2440000 fe73baa8
GPR16: f2458000 00000000 c1941ae4 f1fe2248 00000045 c0de0000 f2458030 00000000
GPR24: 000003e8 0000000f f2458000 f1b4dc90 3e584b46 00000000 f24466a0 c1941a00
NIP [be86b710] 0xbe86b710
LR [be857e88] __run_one+0xec/0x264 [test_bpf]
Call Trace:
[f1b4dfe0] [00000002] 0x2 (unreliable)
Instruction dump:
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
---[ end trace 0000000000000000 ]---
This is a tentative to write above the stack. The problem is encoutered
with tests added by commit 38608ee7b690 ("bpf, tests: Add load store
test case for tail call")
This happens because tail call is done to a BPF prog with a different
stack_depth. At the time being, the stack is kept as is when the caller
tail calls its callee. But at exit, the callee restores the stack based
on its own properties. Therefore here, at each run, r1 is erroneously
increased by 32 - 16 = 16 bytes.
This was done that way in order to pass the tail call count from caller
to callee through the stack. As powerpc32 doesn't have a red zone in
the stack, it was necessary the maintain the stack as is for the tail
call. But it was not anticipated that the BPF frame size could be
different.
Let's take a new approach. Use register r4 to carry the tail call count
during the tail call, and save it into the stack at function entry if
required. This means the input parameter must be in r3, which is more
correct as it is a 32 bits parameter, then tail call better match with
normal BPF function entry, the down side being that we move that input
parameter back and forth between r3 and r4. That can be optimised later.
Doing that also has the advantage of maximising the common parts between
tail calls and a normal function exit.
With the fix, tail call tests are now successfull:
test_bpf: #0 Tail call leaf jited:1 53 PASS
test_bpf: #1 Tail call 2 jited:1 115 PASS
test_bpf: #2 Tail call 3 jited:1 154 PASS
test_bpf: #3 Tail call 4 jited:1 165 PASS
test_bpf: #4 Tail call load/store leaf jited:1 101 PASS
test_bpf: #5 Tail call load/store jited:1 141 PASS
test_bpf: #6 Tail call error path, max count reached jited:1 994 PASS
test_bpf: #7 Tail call count preserved across function calls jited:1 140975 PASS
test_bpf: #8 Tail call error path, NULL target jited:1 110 PASS
test_bpf: #9 Tail call error path, index out of range jited:1 69 PASS
test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
Suggested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Fixes: 51c66ad849a7 ("powerpc/bpf: Implement extended BPF on PPC32")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/757acccb7fbfc78efa42dcf3c974b46678198905.1669278887.git.christophe.leroy@csgroup.eu
2022-11-24 08:37:27 +00:00
|
|
|
/* Put tail_call_cnt in r4 */
|
|
|
|
EMIT(PPC_RAW_MR(_R4, _R0));
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/* tear restore NVRs, ... */
|
|
|
|
bpf_jit_emit_common_epilogue(image, ctx);
|
|
|
|
|
|
|
|
EMIT(PPC_RAW_BCTR());
|
2021-10-05 20:25:21 +00:00
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/* out: */
|
2021-10-05 20:25:21 +00:00
|
|
|
return 0;
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Assemble the body code between the prologue & epilogue */
|
2023-10-20 14:13:58 +00:00
|
|
|
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
|
2023-02-01 10:04:27 +00:00
|
|
|
u32 *addrs, int pass, bool extra_pass)
|
2021-03-22 16:37:52 +00:00
|
|
|
{
|
|
|
|
const struct bpf_insn *insn = fp->insnsi;
|
|
|
|
int flen = fp->len;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
/* Start of epilogue code - will only be valid 2nd pass onwards */
|
|
|
|
u32 exit_addr = addrs[flen];
|
|
|
|
|
|
|
|
for (i = 0; i < flen; i++) {
|
|
|
|
u32 code = insn[i].code;
|
powerpc/bpf/32: perform three operands ALU operations
When an ALU instruction is preceded by a MOV instruction
that just moves a source register into the destination register of
the ALU, replace that MOV+ALU instructions by an ALU operation
taking the source of the MOV as second source instead of using its
destination.
Before the change, code could look like the following, with
superfluous separate register move (mr) instructions.
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
With this commit, addition instructions take r30 and r29 directly.
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b6719beaf01f9dcbcdbb787ef67c4a2f8e3a4cb6.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:31 +00:00
|
|
|
u32 prevcode = i ? insn[i - 1].code : 0;
|
2022-02-14 10:41:51 +00:00
|
|
|
u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
|
2021-03-22 16:37:52 +00:00
|
|
|
u32 dst_reg_h = dst_reg - 1;
|
2022-02-14 10:41:51 +00:00
|
|
|
u32 src_reg = bpf_to_ppc(insn[i].src_reg);
|
2021-03-22 16:37:52 +00:00
|
|
|
u32 src_reg_h = src_reg - 1;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
u32 src2_reg = dst_reg;
|
|
|
|
u32 src2_reg_h = dst_reg_h;
|
2022-06-10 15:55:51 +00:00
|
|
|
u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
|
2022-02-14 10:41:51 +00:00
|
|
|
u32 tmp_reg = bpf_to_ppc(TMP_REG);
|
2021-10-12 12:30:51 +00:00
|
|
|
u32 size = BPF_SIZE(code);
|
2022-06-10 15:55:52 +00:00
|
|
|
u32 save_reg, ret_reg;
|
2021-03-22 16:37:52 +00:00
|
|
|
s16 off = insn[i].off;
|
|
|
|
s32 imm = insn[i].imm;
|
|
|
|
bool func_addr_fixed;
|
|
|
|
u64 func_addr;
|
|
|
|
u32 true_cond;
|
2022-01-06 11:45:07 +00:00
|
|
|
u32 tmp_idx;
|
|
|
|
int j;
|
2021-03-22 16:37:52 +00:00
|
|
|
|
powerpc/bpf/32: perform three operands ALU operations
When an ALU instruction is preceded by a MOV instruction
that just moves a source register into the destination register of
the ALU, replace that MOV+ALU instructions by an ALU operation
taking the source of the MOV as second source instead of using its
destination.
Before the change, code could look like the following, with
superfluous separate register move (mr) instructions.
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
With this commit, addition instructions take r30 and r29 directly.
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b6719beaf01f9dcbcdbb787ef67c4a2f8e3a4cb6.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:31 +00:00
|
|
|
if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
|
|
|
|
(BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
|
|
|
|
BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X &&
|
|
|
|
insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) {
|
|
|
|
src2_reg = bpf_to_ppc(insn[i - 1].src_reg);
|
|
|
|
src2_reg_h = src2_reg - 1;
|
|
|
|
ctx->idx = addrs[i - 1] / 4;
|
|
|
|
}
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/*
|
|
|
|
* addrs[] maps a BPF bytecode address into a real offset from
|
|
|
|
* the start of the body code.
|
|
|
|
*/
|
|
|
|
addrs[i] = ctx->idx * 4;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As an optimization, we note down which registers
|
|
|
|
* are used so that we can only save/restore those in our
|
|
|
|
* prologue and epilogue. We do this here regardless of whether
|
|
|
|
* the actual BPF instruction uses src/dst registers or not
|
|
|
|
* (for instance, BPF_CALL does not use them). The expectation
|
|
|
|
* is that those instructions will have src_reg/dst_reg set to
|
|
|
|
* 0. Even otherwise, we just lose some prologue/epilogue
|
|
|
|
* optimization but everything else should work without
|
|
|
|
* any issues.
|
|
|
|
*/
|
|
|
|
if (dst_reg >= 3 && dst_reg < 32) {
|
|
|
|
bpf_set_seen_register(ctx, dst_reg);
|
|
|
|
bpf_set_seen_register(ctx, dst_reg_h);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src_reg >= 3 && src_reg < 32) {
|
|
|
|
bpf_set_seen_register(ctx, src_reg);
|
|
|
|
bpf_set_seen_register(ctx, src_reg_h);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (code) {
|
|
|
|
/*
|
|
|
|
* Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
|
|
|
|
*/
|
|
|
|
case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
|
|
|
|
imm = -imm;
|
|
|
|
fallthrough;
|
|
|
|
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (!imm) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
} else if (IMM_HA(imm) & 0xffff) {
|
|
|
|
EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm)));
|
|
|
|
src2_reg = dst_reg;
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
if (IMM_L(imm))
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm)));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
|
|
|
|
imm = -imm;
|
|
|
|
fallthrough;
|
|
|
|
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (!imm) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
if (imm >= -32768 && imm < 32768) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
2021-10-05 20:25:29 +00:00
|
|
|
if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
else
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
|
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h));
|
|
|
|
EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg));
|
|
|
|
EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
|
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (imm == 1) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
} else if (imm == -1) {
|
|
|
|
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
|
2023-02-01 10:04:29 +00:00
|
|
|
} else if (is_power_of_2((u32)imm)) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm)));
|
2023-02-01 10:04:29 +00:00
|
|
|
} else if (imm >= -32768 && imm < 32768) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
|
|
|
|
if (!imm) {
|
|
|
|
PPC_LI32(dst_reg, 0);
|
|
|
|
PPC_LI32(dst_reg_h, 0);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
} else if (imm == 1) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
|
|
|
|
} else if (imm == -1) {
|
|
|
|
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
|
|
|
|
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
|
|
|
|
} else if (imm > 0 && is_power_of_2(imm)) {
|
2023-02-01 10:04:29 +00:00
|
|
|
imm = ilog2(imm);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
|
2023-02-01 10:04:29 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
|
|
|
|
} else {
|
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
|
|
|
PPC_LI32(tmp_reg, imm);
|
|
|
|
EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg));
|
|
|
|
if (imm < 0)
|
|
|
|
EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg));
|
|
|
|
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg));
|
|
|
|
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
|
2023-02-01 10:04:29 +00:00
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off)
|
|
|
|
EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off)
|
|
|
|
EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
|
|
|
|
if (!imm)
|
|
|
|
return -EINVAL;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (imm == 1) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
} else if (is_power_of_2((u32)imm)) {
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off)
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
|
2023-02-01 10:04:29 +00:00
|
|
|
} else {
|
|
|
|
PPC_LI32(_R0, imm);
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off)
|
|
|
|
EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
|
2023-02-01 10:04:29 +00:00
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
|
|
|
|
if (!imm)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_power_of_2((u32)imm)) {
|
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
|
|
|
PPC_LI32(tmp_reg, imm);
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off)
|
|
|
|
EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
|
|
|
|
} else if (imm == 1) {
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
2024-03-05 15:36:23 +00:00
|
|
|
} else if (off) {
|
|
|
|
EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
|
|
|
|
EMIT(PPC_RAW_ADDZE(_R0, _R0));
|
|
|
|
EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
|
|
|
|
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
} else {
|
|
|
|
imm = ilog2((u32)imm);
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
|
|
|
|
if (!imm)
|
|
|
|
return -EINVAL;
|
|
|
|
if (imm < 0)
|
|
|
|
imm = -imm;
|
|
|
|
if (!is_power_of_2(imm))
|
|
|
|
return -EOPNOTSUPP;
|
2024-03-05 15:36:23 +00:00
|
|
|
if (imm == 1) {
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
2024-03-05 15:36:23 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
} else if (off) {
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
|
|
|
|
EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
|
|
|
|
EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
|
|
|
|
EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
|
|
|
|
EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
|
|
|
|
EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
|
|
|
|
} else {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
|
2024-03-05 15:36:23 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
|
|
|
|
if (!imm)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!is_power_of_2(abs(imm)))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (imm < 0) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
|
|
|
|
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
imm = -imm;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
src2_reg = dst_reg;
|
|
|
|
}
|
|
|
|
if (imm == 1) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
|
|
|
|
} else {
|
|
|
|
imm = ilog2(imm);
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_NEG(dst_reg, src2_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
|
|
|
|
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
|
|
|
|
*/
|
|
|
|
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
|
|
|
|
if (imm >= 0)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
fallthrough;
|
|
|
|
case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
|
|
|
|
if (!IMM_H(imm)) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm)));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else if (!IMM_L(imm)) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm)));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0,
|
2021-03-22 16:37:52 +00:00
|
|
|
32 - fls(imm), 32 - ffs(imm)));
|
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
|
|
|
|
/* Sign-extended */
|
|
|
|
if (imm < 0)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, -1));
|
|
|
|
fallthrough;
|
|
|
|
case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (IMM_L(imm)) {
|
|
|
|
EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm)));
|
|
|
|
src2_reg = dst_reg;
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
if (IMM_H(imm))
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm)));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
|
|
|
|
if (dst_reg == src_reg) {
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
} else {
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
|
|
|
|
if (dst_reg == src_reg)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
|
|
|
else
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
|
|
|
|
if (imm < 0)
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
fallthrough;
|
|
|
|
case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (IMM_L(imm)) {
|
|
|
|
EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm)));
|
|
|
|
src2_reg = dst_reg;
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
if (IMM_H(imm))
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm)));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
|
2021-04-12 11:44:17 +00:00
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0));
|
|
|
|
EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
2021-04-12 11:44:16 +00:00
|
|
|
case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (imm)
|
|
|
|
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
2021-04-12 11:44:16 +00:00
|
|
|
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
|
2021-03-22 16:37:52 +00:00
|
|
|
if (imm < 0)
|
|
|
|
return -EINVAL;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (!imm) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
} else if (imm < 32) {
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31));
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm));
|
|
|
|
} else if (imm < 64) {
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm));
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
|
|
|
} else {
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
|
2021-04-12 11:44:17 +00:00
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (imm)
|
|
|
|
EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
|
|
|
|
if (imm < 0)
|
|
|
|
return -EINVAL;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (!imm) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
|
|
|
|
} else if (imm < 32) {
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31));
|
|
|
|
} else if (imm < 64) {
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31));
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
} else {
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
|
2021-04-12 11:44:17 +00:00
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
|
|
|
|
EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg));
|
|
|
|
EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
|
2021-04-12 11:44:17 +00:00
|
|
|
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (imm)
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
|
|
|
|
if (imm < 0)
|
|
|
|
return -EINVAL;
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
if (!imm) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
|
|
|
|
} else if (imm < 32) {
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
|
|
|
|
} else if (imm < 64) {
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
|
|
|
|
} else {
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MOV
|
|
|
|
*/
|
|
|
|
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
|
2024-03-05 15:36:23 +00:00
|
|
|
if (off == 8) {
|
|
|
|
EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
|
|
|
|
} else if (off == 16) {
|
|
|
|
EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
|
|
|
|
} else if (off == 32 && dst_reg == src_reg) {
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
|
|
|
|
} else if (off == 32) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
|
|
|
|
} else if (dst_reg != src_reg) {
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src_reg));
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
|
|
|
|
/* special mov32 for zext */
|
|
|
|
if (imm == 1)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
2024-03-05 15:36:23 +00:00
|
|
|
else if (off == 8)
|
|
|
|
EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
|
|
|
|
else if (off == 16)
|
|
|
|
EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
else if (dst_reg != src_reg)
|
|
|
|
EMIT(PPC_RAW_MR(dst_reg, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
|
|
|
|
PPC_LI32(dst_reg, imm);
|
|
|
|
PPC_EX32(dst_reg_h, imm);
|
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
|
|
|
|
PPC_LI32(dst_reg, imm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BPF_FROM_BE/LE
|
|
|
|
*/
|
|
|
|
case BPF_ALU | BPF_END | BPF_FROM_LE:
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
|
2021-03-22 16:37:52 +00:00
|
|
|
switch (imm) {
|
|
|
|
case 16:
|
|
|
|
/* Copy 16 bits to upper part */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Rotate 8 bits right & mask */
|
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
/*
|
|
|
|
* Rotate word left by 8 bits:
|
|
|
|
* 2 bytes are already in their final position
|
|
|
|
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
|
|
|
|
*/
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Rotate 24 bits and insert byte 1 */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Rotate 24 bits and insert byte 3 */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MR(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31));
|
|
|
|
EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Rotate 24 bits and insert byte 1 */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Rotate 24 bits and insert byte 3 */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23));
|
|
|
|
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23));
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_MR(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
|
|
|
|
break;
|
|
|
|
}
|
2024-03-05 15:36:23 +00:00
|
|
|
if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
|
|
|
switch (imm) {
|
|
|
|
case 16:
|
|
|
|
/* zero-extend 16 bits into 32 bits */
|
powerpc/bpf/32: introduce a second source register for ALU operations
At the time being, all ALU operation are performed with same L-source
and destination, requiring the L-source to be moved into destination via
a separate register move, like:
70: 7f c6 f3 78 mr r6,r30
74: 7f a5 eb 78 mr r5,r29
78: 30 c6 ff f4 addic r6,r6,-12
7c: 7c a5 01 d4 addme r5,r5
Introduce a second source register to all ALU operations. For the time
being that second source register is made equal to the destination
register.
That change will allow, via following patch, to optimise the generated
code as:
70: 30 de ff f4 addic r6,r30,-12
74: 7c bd 01 d4 addme r5,r29
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d5aaaba50d9d6b4a0e9f0cd4a5e34101aca1e247.1675245773.git.christophe.leroy@csgroup.eu
2023-02-01 10:04:30 +00:00
|
|
|
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
case 64:
|
|
|
|
/* nop */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2021-07-13 08:18:31 +00:00
|
|
|
/*
|
|
|
|
* BPF_ST NOSPEC (speculation barrier)
|
|
|
|
*/
|
|
|
|
case BPF_ST | BPF_NOSPEC:
|
|
|
|
break;
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/*
|
|
|
|
* BPF_ST(X)
|
|
|
|
*/
|
|
|
|
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
|
|
|
|
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_STB(_R0, dst_reg, off));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
|
|
|
|
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_STH(_R0, dst_reg, off));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
|
|
|
|
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_STW(_R0, dst_reg, off));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
|
|
|
|
EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
|
|
|
|
EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
|
|
|
|
break;
|
|
|
|
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
|
|
|
|
PPC_EX32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_STW(_R0, dst_reg, off));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
2021-07-01 15:08:59 +00:00
|
|
|
* BPF_STX ATOMIC (atomic ops)
|
2021-03-22 16:37:52 +00:00
|
|
|
*/
|
2021-07-01 15:08:59 +00:00
|
|
|
case BPF_STX | BPF_ATOMIC | BPF_W:
|
2022-06-10 15:55:52 +00:00
|
|
|
save_reg = _R0;
|
|
|
|
ret_reg = src_reg;
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
bpf_set_seen_register(ctx, tmp_reg);
|
2022-06-10 15:55:51 +00:00
|
|
|
bpf_set_seen_register(ctx, ax_reg);
|
|
|
|
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Get offset into TMP_REG */
|
|
|
|
EMIT(PPC_RAW_LI(tmp_reg, off));
|
powerpc/bpf: enforce full ordering for ATOMIC operations with BPF_FETCH
The Linux Kernel Memory Model [1][2] requires RMW operations that have a
return value to be fully ordered.
BPF atomic operations with BPF_FETCH (including BPF_XCHG and
BPF_CMPXCHG) return a value back so they need to be JITed to fully
ordered operations. POWERPC currently emits relaxed operations for
these.
We can show this by running the following litmus-test:
PPC SB+atomic_add+fetch
{
0:r0=x; (* dst reg assuming offset is 0 *)
0:r1=2; (* src reg *)
0:r2=1;
0:r4=y; (* P0 writes to this, P1 reads this *)
0:r5=z; (* P1 writes to this, P0 reads this *)
0:r6=0;
1:r2=1;
1:r4=y;
1:r5=z;
}
P0 | P1 ;
stw r2, 0(r4) | stw r2,0(r5) ;
| ;
loop:lwarx r3, r6, r0 | ;
mr r8, r3 | ;
add r3, r3, r1 | sync ;
stwcx. r3, r6, r0 | ;
bne loop | ;
mr r1, r8 | ;
| ;
lwa r7, 0(r5) | lwa r7,0(r4) ;
~exists(0:r7=0 /\ 1:r7=0)
Witnesses
Positive: 9 Negative: 3
Condition ~exists (0:r7=0 /\ 1:r7=0)
Observation SB+atomic_add+fetch Sometimes 3 9
This test shows that the older store in P0 is reordered with a newer
load to a different address. Although there is a RMW operation with
fetch between them. Adding a sync before and after RMW fixes the issue:
Witnesses
Positive: 9 Negative: 0
Condition ~exists (0:r7=0 /\ 1:r7=0)
Observation SB+atomic_add+fetch Never 0 9
[1] https://www.kernel.org/doc/Documentation/memory-barriers.txt
[2] https://www.kernel.org/doc/Documentation/atomic_t.txt
Fixes: aea7ef8a82c0 ("powerpc/bpf/32: add support for BPF_ATOMIC bitwise operations")
Fixes: 2d9206b22743 ("powerpc/bpf/32: Add instructions for atomic_[cmp]xchg")
Fixes: dbe6e2456fb0 ("powerpc/bpf/64: add support for atomic fetch operations")
Fixes: 1e82dfaa7819 ("powerpc/bpf/64: Add instructions for atomic_[cmp]xchg")
Cc: stable@vger.kernel.org # v6.0+
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Naveen N Rao <naveen@kernel.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240513100248.110535-1-puranjay@kernel.org
2024-05-13 10:02:48 +00:00
|
|
|
/*
|
|
|
|
* Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
|
|
|
|
* before and after the operation.
|
|
|
|
*
|
|
|
|
* This is a requirement in the Linux Kernel Memory Model.
|
|
|
|
* See __cmpxchg_u32() in asm/cmpxchg.h as an example.
|
|
|
|
*/
|
|
|
|
if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
|
|
|
|
EMIT(PPC_RAW_SYNC());
|
2022-06-10 15:55:51 +00:00
|
|
|
tmp_idx = ctx->idx * 4;
|
2021-03-22 16:37:52 +00:00
|
|
|
/* load value from memory into r0 */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
|
2022-06-10 15:55:51 +00:00
|
|
|
|
|
|
|
/* Save old value in BPF_REG_AX */
|
|
|
|
if (imm & BPF_FETCH)
|
|
|
|
EMIT(PPC_RAW_MR(ax_reg, _R0));
|
|
|
|
|
|
|
|
switch (imm) {
|
|
|
|
case BPF_ADD:
|
|
|
|
case BPF_ADD | BPF_FETCH:
|
|
|
|
EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_AND:
|
|
|
|
case BPF_AND | BPF_FETCH:
|
|
|
|
EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_OR:
|
|
|
|
case BPF_OR | BPF_FETCH:
|
|
|
|
EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_XOR:
|
|
|
|
case BPF_XOR | BPF_FETCH:
|
|
|
|
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
|
|
|
|
break;
|
2022-06-10 15:55:52 +00:00
|
|
|
case BPF_CMPXCHG:
|
|
|
|
/*
|
|
|
|
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
|
|
|
|
* in src_reg for other cases.
|
|
|
|
*/
|
|
|
|
ret_reg = bpf_to_ppc(BPF_REG_0);
|
|
|
|
|
|
|
|
/* Compare with old value in BPF_REG_0 */
|
|
|
|
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
|
|
|
|
/* Don't set if different from old value */
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
|
|
|
|
fallthrough;
|
|
|
|
case BPF_XCHG:
|
|
|
|
save_reg = src_reg;
|
|
|
|
break;
|
2022-06-10 15:55:51 +00:00
|
|
|
default:
|
|
|
|
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
|
|
|
|
code, i);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* store new value */
|
2022-06-10 15:55:52 +00:00
|
|
|
EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
/* we're done if this succeeded */
|
2022-06-10 15:55:51 +00:00
|
|
|
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
|
|
|
|
|
|
|
/* For the BPF_FETCH variant, get old data into src_reg */
|
|
|
|
if (imm & BPF_FETCH) {
|
powerpc/bpf: enforce full ordering for ATOMIC operations with BPF_FETCH
The Linux Kernel Memory Model [1][2] requires RMW operations that have a
return value to be fully ordered.
BPF atomic operations with BPF_FETCH (including BPF_XCHG and
BPF_CMPXCHG) return a value back so they need to be JITed to fully
ordered operations. POWERPC currently emits relaxed operations for
these.
We can show this by running the following litmus-test:
PPC SB+atomic_add+fetch
{
0:r0=x; (* dst reg assuming offset is 0 *)
0:r1=2; (* src reg *)
0:r2=1;
0:r4=y; (* P0 writes to this, P1 reads this *)
0:r5=z; (* P1 writes to this, P0 reads this *)
0:r6=0;
1:r2=1;
1:r4=y;
1:r5=z;
}
P0 | P1 ;
stw r2, 0(r4) | stw r2,0(r5) ;
| ;
loop:lwarx r3, r6, r0 | ;
mr r8, r3 | ;
add r3, r3, r1 | sync ;
stwcx. r3, r6, r0 | ;
bne loop | ;
mr r1, r8 | ;
| ;
lwa r7, 0(r5) | lwa r7,0(r4) ;
~exists(0:r7=0 /\ 1:r7=0)
Witnesses
Positive: 9 Negative: 3
Condition ~exists (0:r7=0 /\ 1:r7=0)
Observation SB+atomic_add+fetch Sometimes 3 9
This test shows that the older store in P0 is reordered with a newer
load to a different address. Although there is a RMW operation with
fetch between them. Adding a sync before and after RMW fixes the issue:
Witnesses
Positive: 9 Negative: 0
Condition ~exists (0:r7=0 /\ 1:r7=0)
Observation SB+atomic_add+fetch Never 0 9
[1] https://www.kernel.org/doc/Documentation/memory-barriers.txt
[2] https://www.kernel.org/doc/Documentation/atomic_t.txt
Fixes: aea7ef8a82c0 ("powerpc/bpf/32: add support for BPF_ATOMIC bitwise operations")
Fixes: 2d9206b22743 ("powerpc/bpf/32: Add instructions for atomic_[cmp]xchg")
Fixes: dbe6e2456fb0 ("powerpc/bpf/64: add support for atomic fetch operations")
Fixes: 1e82dfaa7819 ("powerpc/bpf/64: Add instructions for atomic_[cmp]xchg")
Cc: stable@vger.kernel.org # v6.0+
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Naveen N Rao <naveen@kernel.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240513100248.110535-1-puranjay@kernel.org
2024-05-13 10:02:48 +00:00
|
|
|
/* Emit 'sync' to enforce full ordering */
|
|
|
|
if (IS_ENABLED(CONFIG_SMP))
|
|
|
|
EMIT(PPC_RAW_SYNC());
|
2022-06-10 15:55:52 +00:00
|
|
|
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
|
2022-06-10 15:55:51 +00:00
|
|
|
if (!fp->aux->verifier_zext)
|
2022-06-10 15:55:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
|
2022-06-10 15:55:51 +00:00
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
2021-07-01 15:08:59 +00:00
|
|
|
case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
|
2021-03-22 16:37:52 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BPF_LDX
|
|
|
|
*/
|
|
|
|
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_B:
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
2021-03-22 16:37:52 +00:00
|
|
|
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_H:
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
2021-03-22 16:37:52 +00:00
|
|
|
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_W:
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
2021-03-22 16:37:52 +00:00
|
|
|
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
|
2021-10-12 12:30:56 +00:00
|
|
|
/*
|
|
|
|
* As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
|
|
|
|
* kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
|
|
|
|
* load only if addr is kernel address (see is_kernel_addr()), otherwise
|
|
|
|
* set dst_reg=0 and move on.
|
|
|
|
*/
|
2024-03-05 15:36:23 +00:00
|
|
|
if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
|
2021-10-12 12:30:56 +00:00
|
|
|
PPC_LI32(_R0, TASK_SIZE - off);
|
|
|
|
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
|
2022-02-14 10:41:36 +00:00
|
|
|
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
|
2021-10-12 12:30:56 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg, 0));
|
|
|
|
/*
|
|
|
|
* For BPF_DW case, "li reg_h,0" would be needed when
|
|
|
|
* !fp->aux->verifier_zext. Emit NOP otherwise.
|
|
|
|
*
|
|
|
|
* Note that "li reg_h,0" is emitted for BPF_B/H/W case,
|
2023-10-13 05:31:18 +00:00
|
|
|
* if necessary. So, jump there instead of emitting an
|
2021-10-12 12:30:56 +00:00
|
|
|
* additional "li reg_h,0" instruction.
|
|
|
|
*/
|
|
|
|
if (size == BPF_DW && !fp->aux->verifier_zext)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
else
|
|
|
|
EMIT(PPC_RAW_NOP());
|
|
|
|
/*
|
|
|
|
* Need to jump two instructions instead of one for BPF_DW case
|
|
|
|
* as there are two load instructions for dst_reg_h & dst_reg
|
|
|
|
* respectively.
|
|
|
|
*/
|
2024-03-05 15:36:23 +00:00
|
|
|
if (size == BPF_DW ||
|
|
|
|
(size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
|
2021-10-12 12:30:56 +00:00
|
|
|
PPC_JMP((ctx->idx + 3) * 4);
|
|
|
|
else
|
|
|
|
PPC_JMP((ctx->idx + 2) * 4);
|
|
|
|
}
|
|
|
|
|
2024-03-05 15:36:23 +00:00
|
|
|
if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
|
|
|
|
switch (size) {
|
|
|
|
case BPF_B:
|
|
|
|
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
|
|
|
|
EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
|
|
|
|
break;
|
|
|
|
case BPF_H:
|
|
|
|
EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_W:
|
|
|
|
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!fp->aux->verifier_zext)
|
|
|
|
EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
|
2021-10-12 12:30:51 +00:00
|
|
|
|
2024-03-05 15:36:23 +00:00
|
|
|
} else {
|
|
|
|
switch (size) {
|
|
|
|
case BPF_B:
|
|
|
|
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_H:
|
|
|
|
EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_W:
|
|
|
|
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
|
|
|
|
break;
|
|
|
|
case BPF_DW:
|
|
|
|
EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
|
|
|
|
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (size != BPF_DW && !fp->aux->verifier_zext)
|
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
}
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
|
|
|
|
if (BPF_MODE(code) == BPF_PROBE_MEM) {
|
|
|
|
int insn_idx = ctx->idx - 1;
|
|
|
|
int jmp_off = 4;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In case of BPF_DW, two lwz instructions are emitted, one
|
|
|
|
* for higher 32-bit and another for lower 32-bit. So, set
|
|
|
|
* ex->insn to the first of the two and jump over both
|
|
|
|
* instructions in fixup.
|
|
|
|
*
|
|
|
|
* Similarly, with !verifier_zext, two instructions are
|
|
|
|
* emitted for BPF_B/H/W case. So, set ex->insn to the
|
|
|
|
* instruction that could fault and skip over both
|
|
|
|
* instructions.
|
|
|
|
*/
|
|
|
|
if (size == BPF_DW || !fp->aux->verifier_zext) {
|
|
|
|
insn_idx -= 1;
|
|
|
|
jmp_off += 4;
|
|
|
|
}
|
|
|
|
|
2023-10-20 14:13:58 +00:00
|
|
|
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
|
bpf ppc32: Add BPF_PROBE_MEM support for JIT
BPF load instruction with BPF_PROBE_MEM mode can cause a fault
inside kernel. Append exception table for such instructions
within BPF program.
Unlike other archs which uses extable 'fixup' field to pass dest_reg
and nip, BPF exception table on PowerPC follows the generic PowerPC
exception table design, where it populates both fixup and extable
sections within BPF program. fixup section contains 3 instructions,
first 2 instructions clear dest_reg (lower & higher 32-bit registers)
and last instruction jumps to next instruction in the BPF code.
extable 'insn' field contains relative offset of the instruction and
'fixup' field contains relative offset of the fixup entry. Example
layout of BPF program with extable present:
+------------------+
| |
| |
0x4020 -->| lwz r28,4(r4) |
| |
| |
0x40ac -->| lwz r3,0(r24) |
| lwz r4,4(r24) |
| |
| |
|------------------|
0x4278 -->| li r28,0 | \
| li r27,0 | | fixup entry
| b 0x4024 | /
0x4284 -->| li r4,0 |
| li r3,0 |
| b 0x40b4 |
|------------------|
0x4290 -->| insn=0xfffffd90 | \ extable entry
| fixup=0xffffffe4 | /
0x4298 -->| insn=0xfffffe14 |
| fixup=0xffffffe8 |
+------------------+
(Addresses shown here are chosen random, not real)
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211012123056.485795-8-hbathini@linux.ibm.com
2021-10-12 12:30:55 +00:00
|
|
|
jmp_off, dst_reg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Doubleword load
|
|
|
|
* 16 byte instruction that uses two 'struct bpf_insn'
|
|
|
|
*/
|
|
|
|
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
|
2022-01-06 11:45:07 +00:00
|
|
|
tmp_idx = ctx->idx;
|
2021-03-22 16:37:52 +00:00
|
|
|
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
|
|
|
|
PPC_LI32(dst_reg, (u32)insn[i].imm);
|
2022-01-06 11:45:07 +00:00
|
|
|
/* padding to allow full 4 instructions for later patching */
|
2023-02-01 10:04:28 +00:00
|
|
|
if (!image)
|
|
|
|
for (j = ctx->idx - tmp_idx; j < 4; j++)
|
|
|
|
EMIT(PPC_RAW_NOP());
|
2021-03-22 16:37:52 +00:00
|
|
|
/* Adjust for two bpf instructions */
|
|
|
|
addrs[++i] = ctx->idx * 4;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return/Exit
|
|
|
|
*/
|
|
|
|
case BPF_JMP | BPF_EXIT:
|
|
|
|
/*
|
|
|
|
* If this isn't the very last instruction, branch to
|
|
|
|
* the epilogue. If we _are_ the last instruction,
|
|
|
|
* we'll just fall through to the epilogue.
|
|
|
|
*/
|
2022-02-14 10:41:37 +00:00
|
|
|
if (i != flen - 1) {
|
|
|
|
ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2021-03-22 16:37:52 +00:00
|
|
|
/* else fall through to the epilogue */
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call kernel helper or bpf function
|
|
|
|
*/
|
|
|
|
case BPF_JMP | BPF_CALL:
|
|
|
|
ctx->seen |= SEEN_FUNC;
|
|
|
|
|
2023-02-01 10:04:27 +00:00
|
|
|
ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
|
2021-03-22 16:37:52 +00:00
|
|
|
&func_addr, &func_addr_fixed);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2022-02-14 10:41:51 +00:00
|
|
|
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
|
|
|
|
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
|
|
|
|
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
|
2023-10-20 14:13:58 +00:00
|
|
|
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
|
2022-02-14 10:41:42 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-03-22 16:37:52 +00:00
|
|
|
|
2022-02-14 10:41:51 +00:00
|
|
|
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
|
|
|
|
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Jumps and branches
|
|
|
|
*/
|
|
|
|
case BPF_JMP | BPF_JA:
|
|
|
|
PPC_JMP(addrs[i + 1 + off]);
|
|
|
|
break;
|
2024-03-05 15:36:23 +00:00
|
|
|
case BPF_JMP32 | BPF_JA:
|
|
|
|
PPC_JMP(addrs[i + 1 + imm]);
|
|
|
|
break;
|
2021-03-22 16:37:52 +00:00
|
|
|
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_X:
|
|
|
|
true_cond = COND_GT;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_X:
|
|
|
|
true_cond = COND_LT;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_X:
|
|
|
|
true_cond = COND_GE;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
|
|
|
true_cond = COND_LE;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_X:
|
|
|
|
true_cond = COND_EQ;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_X:
|
|
|
|
true_cond = COND_NE;
|
|
|
|
goto cond_branch;
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_X:
|
|
|
|
true_cond = COND_NE;
|
|
|
|
/* fallthrough; */
|
|
|
|
|
|
|
|
cond_branch:
|
|
|
|
switch (code) {
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_X:
|
|
|
|
/* unsigned comparison */
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_X:
|
|
|
|
/* unsigned comparison */
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_X:
|
|
|
|
/* signed comparison */
|
|
|
|
EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
|
|
|
/* signed comparison */
|
|
|
|
EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
|
|
|
|
break;
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_X:
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
|
2021-03-22 16:37:52 +00:00
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_X: {
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_K:
|
|
|
|
/*
|
|
|
|
* Need sign-extended load, so only positive
|
|
|
|
* values can be used as imm in cmplwi
|
|
|
|
*/
|
|
|
|
if (imm >= 0 && imm < 32768) {
|
|
|
|
EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
|
|
|
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
|
|
|
|
} else {
|
|
|
|
/* sign-extending load ... but unsigned comparison */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_EX32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
|
|
|
|
PPC_LI32(_R0, imm);
|
2021-03-22 16:37:52 +00:00
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_K:
|
|
|
|
if (imm >= 0 && imm < 65536) {
|
|
|
|
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
|
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_K:
|
|
|
|
if (imm >= 0 && imm < 65536) {
|
|
|
|
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
|
|
|
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
|
|
|
|
} else {
|
|
|
|
/* sign-extending load */
|
|
|
|
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
2021-03-22 16:37:52 +00:00
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_K:
|
|
|
|
/*
|
|
|
|
* signed comparison, so any 16-bit value
|
|
|
|
* can be used in cmpwi
|
|
|
|
*/
|
|
|
|
if (imm >= -32768 && imm < 32768) {
|
|
|
|
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
|
|
|
|
} else {
|
|
|
|
/* sign-extending load */
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_CMPW(dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_K:
|
|
|
|
/* andi does not sign-extend the immediate */
|
|
|
|
if (imm >= 0 && imm < 32768) {
|
|
|
|
/* PPC_ANDI is _only/always_ dot-form */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
2021-03-22 16:37:52 +00:00
|
|
|
if (imm < 0) {
|
|
|
|
EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
|
|
|
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
|
|
|
|
}
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_K:
|
|
|
|
/* andi does not sign-extend the immediate */
|
2021-10-05 20:25:27 +00:00
|
|
|
if (imm >= 0 && imm < 32768) {
|
2021-03-22 16:37:52 +00:00
|
|
|
/* PPC_ANDI is _only/always_ dot-form */
|
2021-05-20 10:23:08 +00:00
|
|
|
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
|
2021-03-22 16:37:52 +00:00
|
|
|
} else {
|
2021-05-20 10:23:08 +00:00
|
|
|
PPC_LI32(_R0, imm);
|
|
|
|
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
|
2021-03-22 16:37:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
PPC_BCC(true_cond, addrs[i + 1 + off]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tail call
|
|
|
|
*/
|
|
|
|
case BPF_JMP | BPF_TAIL_CALL:
|
|
|
|
ctx->seen |= SEEN_TAILCALL;
|
2021-10-05 20:25:21 +00:00
|
|
|
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2021-03-22 16:37:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* The filter contains something cruel & unusual.
|
|
|
|
* We don't handle it, but also there shouldn't be
|
|
|
|
* anything missing from our list.
|
|
|
|
*/
|
|
|
|
pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
|
2021-10-05 20:25:28 +00:00
|
|
|
!insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
|
2021-03-22 16:37:52 +00:00
|
|
|
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set end-of-body-code address for exit. */
|
|
|
|
addrs[i] = ctx->idx * 4;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|