mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 13:16:22 +00:00
bpf: Refill only one percpu element in memalloc
Typically for percpu map element or data structure, once allocated, most operations are lookup or in-place update. Deletion are really rare. Currently, for percpu data strcture, 4 elements will be refilled if the size is <= 256. Let us just do with one element for percpu data. For example, for size 256 and 128 cpus, the potential saving will be 3 * 256 * 128 * 128 = 12MB. Acked-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Yonghong Song <yonghong.song@linux.dev> Link: https://lore.kernel.org/r/20231222031750.1289290-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
c39aa3b289
commit
5b95e638f1
@ -485,11 +485,16 @@ static void init_refill_work(struct bpf_mem_cache *c)
|
||||
|
||||
static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||
{
|
||||
/* To avoid consuming memory assume that 1st run of bpf
|
||||
* prog won't be doing more than 4 map_update_elem from
|
||||
* irq disabled region
|
||||
int cnt = 1;
|
||||
|
||||
/* To avoid consuming memory, for non-percpu allocation, assume that
|
||||
* 1st run of bpf prog won't be doing more than 4 map_update_elem from
|
||||
* irq disabled region if unit size is less than or equal to 256.
|
||||
* For all other cases, let us just do one allocation.
|
||||
*/
|
||||
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
|
||||
if (!c->percpu_size && c->unit_size <= 256)
|
||||
cnt = 4;
|
||||
alloc_bulk(c, cnt, cpu_to_node(cpu), false);
|
||||
}
|
||||
|
||||
/* When size != 0 bpf_mem_cache for each cpu.
|
||||
|
Loading…
Reference in New Issue
Block a user