mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
bpf: Remove migrate_{disable|enable} in ->map_for_each_callback
BPF program may call bpf_for_each_map_elem(), and it will call the ->map_for_each_callback callback of related bpf map. Considering the running context of bpf program has already disabled migration, remove the unnecessary migrate_{disable|enable} pair in the implementations of ->map_for_each_callback. To ensure the guarantee will not be voilated later, also add cant_migrate() check in the implementations. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20250108010728.207536-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
1b1a01db17
commit
ea5b229630
@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
|
||||
u64 ret = 0;
|
||||
void *val;
|
||||
|
||||
cant_migrate();
|
||||
|
||||
if (flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
array = container_of(map, struct bpf_array, map);
|
||||
if (is_percpu)
|
||||
migrate_disable();
|
||||
for (i = 0; i < map->max_entries; i++) {
|
||||
if (is_percpu)
|
||||
val = this_cpu_ptr(array->pptrs[i]);
|
||||
@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_percpu)
|
||||
migrate_enable();
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
|
@ -2208,17 +2208,18 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
|
||||
bool is_percpu;
|
||||
u64 ret = 0;
|
||||
|
||||
cant_migrate();
|
||||
|
||||
if (flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
is_percpu = htab_is_percpu(htab);
|
||||
|
||||
roundup_key_size = round_up(map->key_size, 8);
|
||||
/* disable migration so percpu value prepared here will be the
|
||||
* same as the one seen by the bpf program with bpf_map_lookup_elem().
|
||||
/* migration has been disabled, so percpu value prepared here will be
|
||||
* the same as the one seen by the bpf program with
|
||||
* bpf_map_lookup_elem().
|
||||
*/
|
||||
if (is_percpu)
|
||||
migrate_disable();
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
b = &htab->buckets[i];
|
||||
rcu_read_lock();
|
||||
@ -2244,8 +2245,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
|
||||
rcu_read_unlock();
|
||||
}
|
||||
out:
|
||||
if (is_percpu)
|
||||
migrate_enable();
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user