mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
bpf: Remove migrate_{disable|enable} from LPM trie
Both bpf program and bpf syscall may invoke ->update or ->delete operation for LPM trie. For bpf program, its running context has already disabled migration explicitly through (migrate_disable()) or implicitly through (preempt_disable() or disable irq). For bpf syscall, the migration is disabled through the use of bpf_disable_instrumentation() before invoking the corresponding map operation callback. Therefore, it is safe to remove the migrate_{disable|enable){} pair from LPM trie. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20250108010728.207536-2-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
bfaac2a0b9
commit
1b1a01db17
@ -289,16 +289,11 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie,
|
static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie,
|
||||||
const void *value,
|
const void *value)
|
||||||
bool disable_migration)
|
|
||||||
{
|
{
|
||||||
struct lpm_trie_node *node;
|
struct lpm_trie_node *node;
|
||||||
|
|
||||||
if (disable_migration)
|
|
||||||
migrate_disable();
|
|
||||||
node = bpf_mem_cache_alloc(&trie->ma);
|
node = bpf_mem_cache_alloc(&trie->ma);
|
||||||
if (disable_migration)
|
|
||||||
migrate_enable();
|
|
||||||
|
|
||||||
if (!node)
|
if (!node)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -342,10 +337,8 @@ static long trie_update_elem(struct bpf_map *map,
|
|||||||
if (key->prefixlen > trie->max_prefixlen)
|
if (key->prefixlen > trie->max_prefixlen)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Allocate and fill a new node. Need to disable migration before
|
/* Allocate and fill a new node */
|
||||||
* invoking bpf_mem_cache_alloc().
|
new_node = lpm_trie_node_alloc(trie, value);
|
||||||
*/
|
|
||||||
new_node = lpm_trie_node_alloc(trie, value, true);
|
|
||||||
if (!new_node)
|
if (!new_node)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -425,8 +418,7 @@ static long trie_update_elem(struct bpf_map *map,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* migration is disabled within the locked scope */
|
im_node = lpm_trie_node_alloc(trie, NULL);
|
||||||
im_node = lpm_trie_node_alloc(trie, NULL, false);
|
|
||||||
if (!im_node) {
|
if (!im_node) {
|
||||||
trie->n_entries--;
|
trie->n_entries--;
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -452,11 +444,9 @@ static long trie_update_elem(struct bpf_map *map,
|
|||||||
out:
|
out:
|
||||||
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
|
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
|
||||||
|
|
||||||
migrate_disable();
|
|
||||||
if (ret)
|
if (ret)
|
||||||
bpf_mem_cache_free(&trie->ma, new_node);
|
bpf_mem_cache_free(&trie->ma, new_node);
|
||||||
bpf_mem_cache_free_rcu(&trie->ma, free_node);
|
bpf_mem_cache_free_rcu(&trie->ma, free_node);
|
||||||
migrate_enable();
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -555,10 +545,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
|
|||||||
out:
|
out:
|
||||||
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
|
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
|
||||||
|
|
||||||
migrate_disable();
|
|
||||||
bpf_mem_cache_free_rcu(&trie->ma, free_parent);
|
bpf_mem_cache_free_rcu(&trie->ma, free_parent);
|
||||||
bpf_mem_cache_free_rcu(&trie->ma, free_node);
|
bpf_mem_cache_free_rcu(&trie->ma, free_node);
|
||||||
migrate_enable();
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user