mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-10 07:00:48 +00:00
net: fix sk_memory_allocated_{add|sub} vs softirqs
Jonathan Heathcote reported a regression caused by blamed commit on aarch64 architecture. x86 happens to have irq-safe __this_cpu_add_return() and __this_cpu_sub(), but this is not generic. I think my confusion came from "struct sock" argument, because these helpers are called with a locked socket. But the memory accounting is per-proto (and per-cpu after the blamed commit). We might cleanup these helpers later to directly accept a "struct proto *proto" argument. Switch to this_cpu_add_return() and this_cpu_xchg() operations, and get rid of preempt_disable()/preempt_enable() pairs. Fast path becomes a bit faster as a result :) Many thanks to Jonathan Heathcote for his awesome report and investigations. Fixes: 3cd3399dd7a8 ("net: implement per-cpu reserves for memory_allocated") Reported-by: Jonathan Heathcote <jonathan.heathcote@bbc.co.uk> Closes: https://lore.kernel.org/netdev/VI1PR01MB42407D7947B2EA448F1E04EFD10D2@VI1PR01MB4240.eurprd01.prod.exchangelabs.com/ Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> Link: https://lore.kernel.org/r/20240421175248.1692552-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a44f2eb106
commit
3584718cf2
@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk)
|
||||
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
|
||||
extern int sysctl_mem_pcpu_rsv;
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_add(struct sock *sk, int amt)
|
||||
static inline void proto_memory_pcpu_drain(struct proto *proto)
|
||||
{
|
||||
int local_reserve;
|
||||
int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
|
||||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
preempt_enable();
|
||||
if (val)
|
||||
atomic_long_add(val, proto->memory_allocated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_sub(struct sock *sk, int amt)
|
||||
sk_memory_allocated_add(const struct sock *sk, int val)
|
||||
{
|
||||
int local_reserve;
|
||||
struct proto *proto = sk->sk_prot;
|
||||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
preempt_enable();
|
||||
val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
|
||||
|
||||
if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
|
||||
proto_memory_pcpu_drain(proto);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_sub(const struct sock *sk, int val)
|
||||
{
|
||||
struct proto *proto = sk->sk_prot;
|
||||
|
||||
val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
|
||||
|
||||
if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
|
||||
proto_memory_pcpu_drain(proto);
|
||||
}
|
||||
|
||||
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
|
||||
|
Loading…
x
Reference in New Issue
Block a user