bpf: Remove migrate_{disable|enable} from bpf_task_storage_lock helpers

Three callers of bpf_task_storage_lock() are ->map_lookup_elem,
->map_update_elem, ->map_delete_elem from bpf syscall. BPF syscall for
these three operations of task storage has already disabled migration.
Another two callers are bpf_task_storage_get() and
bpf_task_storage_delete() helpers which will be used by BPF program.

Two callers of bpf_task_storage_trylock() are bpf_task_storage_get() and
bpf_task_storage_delete() helpers. The running contexts of these helpers
have already disabled migration.

Therefore, it is safe to remove migrate_{disable|enable} from task
storage lock helpers for these call sites. However,
bpf_task_storage_free() also invokes bpf_task_storage_lock() and its
running context doesn't disable migration, therefore, add the missed
migrate_{disable|enable} in bpf_task_storage_free().

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20250108010728.207536-6-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Hou Tao 2025-01-08 09:07:17 +08:00 committed by Alexei Starovoitov
parent 25dc65f75b
commit 9e6c958b54

View File

@ -24,22 +24,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void) static void bpf_task_storage_lock(void)
{ {
migrate_disable(); cant_migrate();
this_cpu_inc(bpf_task_storage_busy); this_cpu_inc(bpf_task_storage_busy);
} }
static void bpf_task_storage_unlock(void) static void bpf_task_storage_unlock(void)
{ {
this_cpu_dec(bpf_task_storage_busy); this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
} }
static bool bpf_task_storage_trylock(void) static bool bpf_task_storage_trylock(void)
{ {
migrate_disable(); cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) { if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
this_cpu_dec(bpf_task_storage_busy); this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
return false; return false;
} }
return true; return true;
@ -72,18 +70,19 @@ void bpf_task_storage_free(struct task_struct *task)
{ {
struct bpf_local_storage *local_storage; struct bpf_local_storage *local_storage;
migrate_disable();
rcu_read_lock(); rcu_read_lock();
local_storage = rcu_dereference(task->bpf_storage); local_storage = rcu_dereference(task->bpf_storage);
if (!local_storage) { if (!local_storage)
rcu_read_unlock(); goto out;
return;
}
bpf_task_storage_lock(); bpf_task_storage_lock();
bpf_local_storage_destroy(local_storage); bpf_local_storage_destroy(local_storage);
bpf_task_storage_unlock(); bpf_task_storage_unlock();
out:
rcu_read_unlock(); rcu_read_unlock();
migrate_enable();
} }
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key) static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)