mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
bpf: Add necessary migrate_disable to range_tree.
When running bpf selftest (./test_progs -j), the following warnings
showed up:
$ ./test_progs -t arena_atomics
...
BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u19:0/12501
caller is bpf_mem_free+0x128/0x330
...
Call Trace:
<TASK>
dump_stack_lvl
check_preemption_disabled
bpf_mem_free
range_tree_destroy
arena_map_free
bpf_map_free_deferred
process_scheduled_works
...
For selftests arena_htab and arena_list, similar smp_process_id() BUGs are
dumped, and the following are two stack trace:
<TASK>
dump_stack_lvl
check_preemption_disabled
bpf_mem_alloc
range_tree_set
arena_map_alloc
map_create
...
<TASK>
dump_stack_lvl
check_preemption_disabled
bpf_mem_alloc
range_tree_clear
arena_vm_fault
do_pte_missing
handle_mm_fault
do_user_addr_fault
...
Add migrate_{disable,enable}() around related bpf_mem_{alloc,free}()
calls to fix the issue.
Fixes: b795379757
("bpf: Introduce range_tree data structure and use it in bpf arena")
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20241115060354.2832495-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
ab4dc30c53
commit
4ff04abf9d
@ -150,7 +150,9 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
|
|||||||
range_it_insert(rn, rt);
|
range_it_insert(rn, rt);
|
||||||
|
|
||||||
/* Add a range */
|
/* Add a range */
|
||||||
|
migrate_disable();
|
||||||
new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
|
new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
|
||||||
|
migrate_enable();
|
||||||
if (!new_rn)
|
if (!new_rn)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
new_rn->rn_start = last + 1;
|
new_rn->rn_start = last + 1;
|
||||||
@ -170,7 +172,9 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
|
|||||||
} else {
|
} else {
|
||||||
/* in the middle of the clearing range */
|
/* in the middle of the clearing range */
|
||||||
range_it_remove(rn, rt);
|
range_it_remove(rn, rt);
|
||||||
|
migrate_disable();
|
||||||
bpf_mem_free(&bpf_global_ma, rn);
|
bpf_mem_free(&bpf_global_ma, rn);
|
||||||
|
migrate_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -223,7 +227,9 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
|
|||||||
range_it_remove(right, rt);
|
range_it_remove(right, rt);
|
||||||
left->rn_last = right->rn_last;
|
left->rn_last = right->rn_last;
|
||||||
range_it_insert(left, rt);
|
range_it_insert(left, rt);
|
||||||
|
migrate_disable();
|
||||||
bpf_mem_free(&bpf_global_ma, right);
|
bpf_mem_free(&bpf_global_ma, right);
|
||||||
|
migrate_enable();
|
||||||
} else if (left) {
|
} else if (left) {
|
||||||
/* Combine with the left range */
|
/* Combine with the left range */
|
||||||
range_it_remove(left, rt);
|
range_it_remove(left, rt);
|
||||||
@ -235,7 +241,9 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
|
|||||||
right->rn_start = start;
|
right->rn_start = start;
|
||||||
range_it_insert(right, rt);
|
range_it_insert(right, rt);
|
||||||
} else {
|
} else {
|
||||||
|
migrate_disable();
|
||||||
left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
|
left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
|
||||||
|
migrate_enable();
|
||||||
if (!left)
|
if (!left)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
left->rn_start = start;
|
left->rn_start = start;
|
||||||
@ -251,7 +259,9 @@ void range_tree_destroy(struct range_tree *rt)
|
|||||||
|
|
||||||
while ((rn = range_it_iter_first(rt, 0, -1U))) {
|
while ((rn = range_it_iter_first(rt, 0, -1U))) {
|
||||||
range_it_remove(rn, rt);
|
range_it_remove(rn, rt);
|
||||||
|
migrate_disable();
|
||||||
bpf_mem_free(&bpf_global_ma, rn);
|
bpf_mem_free(&bpf_global_ma, rn);
|
||||||
|
migrate_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user