2020-03-27 14:48:50 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
|
2024-05-13 18:13:32 -07:00
|
|
|
#include <net/inet_common.h>
|
|
|
|
|
2020-03-27 14:48:50 -07:00
|
|
|
enum linux_mptcp_mib_field {
|
|
|
|
MPTCP_MIB_NUM = 0,
|
|
|
|
MPTCP_MIB_MPCAPABLEPASSIVE, /* Received SYN with MP_CAPABLE */
|
2021-04-01 16:19:42 -07:00
|
|
|
MPTCP_MIB_MPCAPABLEACTIVE, /* Sent SYN with MP_CAPABLE */
|
|
|
|
MPTCP_MIB_MPCAPABLEACTIVEACK, /* Received SYN/ACK with MP_CAPABLE */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
|
|
|
|
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
|
|
|
|
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
|
2024-09-09 22:09:22 +02:00
|
|
|
MPTCP_MIB_MPCAPABLEACTIVEDROP, /* Client-side fallback due to a MPC drop */
|
2024-09-09 22:09:23 +02:00
|
|
|
MPTCP_MIB_MPCAPABLEACTIVEDISABLED, /* Client-side disabled due to past issues */
|
mptcp: prevent MPC handshake on port-based signal endpoints
Syzkaller reported a lockdep splat:
============================================
WARNING: possible recursive locking detected
6.11.0-rc6-syzkaller-00019-g67784a74e258 #0 Not tainted
--------------------------------------------
syz-executor364/5113 is trying to acquire lock:
ffff8880449f1958 (k-slock-AF_INET){+.-.}-{2:2}, at: spin_lock include/linux/spinlock.h:351 [inline]
ffff8880449f1958 (k-slock-AF_INET){+.-.}-{2:2}, at: sk_clone_lock+0x2cd/0xf40 net/core/sock.c:2328
but task is already holding lock:
ffff88803fe3cb58 (k-slock-AF_INET){+.-.}-{2:2}, at: spin_lock include/linux/spinlock.h:351 [inline]
ffff88803fe3cb58 (k-slock-AF_INET){+.-.}-{2:2}, at: sk_clone_lock+0x2cd/0xf40 net/core/sock.c:2328
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(k-slock-AF_INET);
lock(k-slock-AF_INET);
*** DEADLOCK ***
May be due to missing lock nesting notation
7 locks held by syz-executor364/5113:
#0: ffff8880449f0e18 (sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1607 [inline]
#0: ffff8880449f0e18 (sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_sendmsg+0x153/0x1b10 net/mptcp/protocol.c:1806
#1: ffff88803fe39ad8 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1607 [inline]
#1: ffff88803fe39ad8 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_sendmsg_fastopen+0x11f/0x530 net/mptcp/protocol.c:1727
#2: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:326 [inline]
#2: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:838 [inline]
#2: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: __ip_queue_xmit+0x5f/0x1b80 net/ipv4/ip_output.c:470
#3: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:326 [inline]
#3: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:838 [inline]
#3: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: ip_finish_output2+0x45f/0x1390 net/ipv4/ip_output.c:228
#4: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: local_lock_acquire include/linux/local_lock_internal.h:29 [inline]
#4: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: process_backlog+0x33b/0x15b0 net/core/dev.c:6104
#5: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:326 [inline]
#5: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:838 [inline]
#5: ffffffff8e938320 (rcu_read_lock){....}-{1:2}, at: ip_local_deliver_finish+0x230/0x5f0 net/ipv4/ip_input.c:232
#6: ffff88803fe3cb58 (k-slock-AF_INET){+.-.}-{2:2}, at: spin_lock include/linux/spinlock.h:351 [inline]
#6: ffff88803fe3cb58 (k-slock-AF_INET){+.-.}-{2:2}, at: sk_clone_lock+0x2cd/0xf40 net/core/sock.c:2328
stack backtrace:
CPU: 0 UID: 0 PID: 5113 Comm: syz-executor364 Not tainted 6.11.0-rc6-syzkaller-00019-g67784a74e258 #0
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
Call Trace:
<IRQ>
__dump_stack lib/dump_stack.c:93 [inline]
dump_stack_lvl+0x241/0x360 lib/dump_stack.c:119
check_deadlock kernel/locking/lockdep.c:3061 [inline]
validate_chain+0x15d3/0x5900 kernel/locking/lockdep.c:3855
__lock_acquire+0x137a/0x2040 kernel/locking/lockdep.c:5142
lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5759
__raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline]
_raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154
spin_lock include/linux/spinlock.h:351 [inline]
sk_clone_lock+0x2cd/0xf40 net/core/sock.c:2328
mptcp_sk_clone_init+0x32/0x13c0 net/mptcp/protocol.c:3279
subflow_syn_recv_sock+0x931/0x1920 net/mptcp/subflow.c:874
tcp_check_req+0xfe4/0x1a20 net/ipv4/tcp_minisocks.c:853
tcp_v4_rcv+0x1c3e/0x37f0 net/ipv4/tcp_ipv4.c:2267
ip_protocol_deliver_rcu+0x22e/0x440 net/ipv4/ip_input.c:205
ip_local_deliver_finish+0x341/0x5f0 net/ipv4/ip_input.c:233
NF_HOOK+0x3a4/0x450 include/linux/netfilter.h:314
NF_HOOK+0x3a4/0x450 include/linux/netfilter.h:314
__netif_receive_skb_one_core net/core/dev.c:5661 [inline]
__netif_receive_skb+0x2bf/0x650 net/core/dev.c:5775
process_backlog+0x662/0x15b0 net/core/dev.c:6108
__napi_poll+0xcb/0x490 net/core/dev.c:6772
napi_poll net/core/dev.c:6841 [inline]
net_rx_action+0x89b/0x1240 net/core/dev.c:6963
handle_softirqs+0x2c4/0x970 kernel/softirq.c:554
do_softirq+0x11b/0x1e0 kernel/softirq.c:455
</IRQ>
<TASK>
__local_bh_enable_ip+0x1bb/0x200 kernel/softirq.c:382
local_bh_enable include/linux/bottom_half.h:33 [inline]
rcu_read_unlock_bh include/linux/rcupdate.h:908 [inline]
__dev_queue_xmit+0x1763/0x3e90 net/core/dev.c:4450
dev_queue_xmit include/linux/netdevice.h:3105 [inline]
neigh_hh_output include/net/neighbour.h:526 [inline]
neigh_output include/net/neighbour.h:540 [inline]
ip_finish_output2+0xd41/0x1390 net/ipv4/ip_output.c:235
ip_local_out net/ipv4/ip_output.c:129 [inline]
__ip_queue_xmit+0x118c/0x1b80 net/ipv4/ip_output.c:535
__tcp_transmit_skb+0x2544/0x3b30 net/ipv4/tcp_output.c:1466
tcp_rcv_synsent_state_process net/ipv4/tcp_input.c:6542 [inline]
tcp_rcv_state_process+0x2c32/0x4570 net/ipv4/tcp_input.c:6729
tcp_v4_do_rcv+0x77d/0xc70 net/ipv4/tcp_ipv4.c:1934
sk_backlog_rcv include/net/sock.h:1111 [inline]
__release_sock+0x214/0x350 net/core/sock.c:3004
release_sock+0x61/0x1f0 net/core/sock.c:3558
mptcp_sendmsg_fastopen+0x1ad/0x530 net/mptcp/protocol.c:1733
mptcp_sendmsg+0x1884/0x1b10 net/mptcp/protocol.c:1812
sock_sendmsg_nosec net/socket.c:730 [inline]
__sock_sendmsg+0x1a6/0x270 net/socket.c:745
____sys_sendmsg+0x525/0x7d0 net/socket.c:2597
___sys_sendmsg net/socket.c:2651 [inline]
__sys_sendmmsg+0x3b2/0x740 net/socket.c:2737
__do_sys_sendmmsg net/socket.c:2766 [inline]
__se_sys_sendmmsg net/socket.c:2763 [inline]
__x64_sys_sendmmsg+0xa0/0xb0 net/socket.c:2763
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f04fb13a6b9
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 01 1a 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007ffd651f42d8 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f04fb13a6b9
RDX: 0000000000000001 RSI: 0000000020000d00 RDI: 0000000000000004
RBP: 00007ffd651f4310 R08: 0000000000000001 R09: 0000000000000001
R10: 0000000020000080 R11: 0000000000000246 R12: 00000000000f4240
R13: 00007f04fb187449 R14: 00007ffd651f42f4 R15: 00007ffd651f4300
</TASK>
As noted by Cong Wang, the splat is false positive, but the code
path leading to the report is an unexpected one: a client is
attempting an MPC handshake towards the in-kernel listener created
by the in-kernel PM for a port based signal endpoint.
Such connection will be never accepted; many of them can make the
listener queue full and preventing the creation of MPJ subflow via
such listener - its intended role.
Explicitly detect this scenario at initial-syn time and drop the
incoming MPC request.
Fixes: 1729cf186d8a ("mptcp: create the listening socket for new port")
Cc: stable@vger.kernel.org
Reported-by: syzbot+f4aacdfef2c6a6529c3e@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=f4aacdfef2c6a6529c3e
Cc: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20241014-net-mptcp-mpc-port-endp-v2-1-7faea8e6b6ae@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-10-14 16:06:00 +02:00
|
|
|
MPTCP_MIB_MPCAPABLEENDPATTEMPT, /* Prohibited MPC to port-based endp */
|
2021-04-01 16:19:41 -07:00
|
|
|
MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
|
|
|
|
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
|
|
|
|
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
|
mptcp: mib: count MPJ with backup flag
Without such counters, it is difficult to easily debug issues with MPJ
not having the backup flags on production servers.
This is not strictly a fix, but it eases to validate the following
patches without requiring to take packet traces, to query ongoing
connections with Netlink with admin permissions, or to guess by looking
at the behaviour of the packet scheduler. Also, the modification is self
contained, isolated, well controlled, and the increments are done just
after others, there from the beginning. It looks then safe, and helpful
to backport this.
Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
Cc: stable@vger.kernel.org
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-07-27 12:01:26 +02:00
|
|
|
MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
|
mptcp: mib: count MPJ with backup flag
Without such counters, it is difficult to easily debug issues with MPJ
not having the backup flags on production servers.
This is not strictly a fix, but it eases to validate the following
patches without requiring to take packet traces, to query ongoing
connections with Netlink with admin permissions, or to guess by looking
at the behaviour of the packet scheduler. Also, the modification is self
contained, isolated, well controlled, and the increments are done just
after others, there from the beginning. It looks then safe, and helpful
to backport this.
Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
Cc: stable@vger.kernel.org
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-07-27 12:01:26 +02:00
|
|
|
MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
|
|
|
|
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
|
|
|
|
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
|
2024-09-02 12:45:54 +02:00
|
|
|
MPTCP_MIB_JOINSYNTX, /* Sending a SYN + MP_JOIN */
|
|
|
|
MPTCP_MIB_JOINSYNTXCREATSKERR, /* Not able to create a socket when sending a SYN + MP_JOIN */
|
|
|
|
MPTCP_MIB_JOINSYNTXBINDERR, /* Not able to bind() the address when sending a SYN + MP_JOIN */
|
|
|
|
MPTCP_MIB_JOINSYNTXCONNECTERR, /* Not able to connect() when sending a SYN + MP_JOIN */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
|
2024-10-08 13:04:52 +02:00
|
|
|
MPTCP_MIB_DSSCORRUPTIONFALLBACK,/* DSS corruption detected, fallback */
|
|
|
|
MPTCP_MIB_DSSCORRUPTIONRESET, /* DSS corruption detected, MPJ subflow reset */
|
2022-04-22 14:55:41 -07:00
|
|
|
MPTCP_MIB_INFINITEMAPTX, /* Sent an infinite mapping */
|
2020-03-27 14:48:50 -07:00
|
|
|
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
|
2021-06-21 15:54:37 -07:00
|
|
|
MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */
|
2021-06-17 16:46:18 -07:00
|
|
|
MPTCP_MIB_DATACSUMERR, /* The data checksum fail */
|
2020-09-14 10:01:14 +02:00
|
|
|
MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
|
|
|
|
MPTCP_MIB_OFOQUEUE, /* Segments inserted into OoO queue */
|
|
|
|
MPTCP_MIB_OFOMERGE, /* Segments merged in OoO queue */
|
|
|
|
MPTCP_MIB_NODSSWINDOW, /* Segments not in MPTCP windows */
|
|
|
|
MPTCP_MIB_DUPDATA, /* Segments discarded due to duplicate DSS */
|
2020-09-24 08:29:51 +08:00
|
|
|
MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
|
2023-05-17 12:16:16 -07:00
|
|
|
MPTCP_MIB_ADDADDRTX, /* Sent ADD_ADDR with echo-flag=0 */
|
|
|
|
MPTCP_MIB_ADDADDRTXDROP, /* ADD_ADDR with echo-flag=0 not send due to
|
|
|
|
* resource exhaustion
|
|
|
|
*/
|
2020-09-24 08:29:51 +08:00
|
|
|
MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
|
2023-05-17 12:16:16 -07:00
|
|
|
MPTCP_MIB_ECHOADDTX, /* Send ADD_ADDR with echo-flag=1 */
|
|
|
|
MPTCP_MIB_ECHOADDTXDROP, /* ADD_ADDR with echo-flag=1 not send due
|
|
|
|
* to resource exhaustion
|
|
|
|
*/
|
2021-02-01 15:09:19 -08:00
|
|
|
MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */
|
2022-02-18 13:35:42 -08:00
|
|
|
MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */
|
2021-02-01 15:09:19 -08:00
|
|
|
MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */
|
|
|
|
MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */
|
|
|
|
MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */
|
|
|
|
MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */
|
|
|
|
MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */
|
2020-09-24 08:29:56 +08:00
|
|
|
MPTCP_MIB_RMADDR, /* Received RM_ADDR */
|
2022-02-18 13:35:42 -08:00
|
|
|
MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */
|
2023-05-17 12:16:16 -07:00
|
|
|
MPTCP_MIB_RMADDRTX, /* Sent RM_ADDR */
|
|
|
|
MPTCP_MIB_RMADDRTXDROP, /* RM_ADDR not sent due to resource exhaustion */
|
2020-09-24 08:29:56 +08:00
|
|
|
MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
|
2021-01-08 16:48:01 -08:00
|
|
|
MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
|
|
|
|
MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
|
2021-08-24 16:26:18 -07:00
|
|
|
MPTCP_MIB_MPFAILTX, /* Transmit a MP_FAIL */
|
|
|
|
MPTCP_MIB_MPFAILRX, /* Received a MP_FAIL */
|
2022-03-04 11:36:27 -08:00
|
|
|
MPTCP_MIB_MPFASTCLOSETX, /* Transmit a MP_FASTCLOSE */
|
|
|
|
MPTCP_MIB_MPFASTCLOSERX, /* Received a MP_FASTCLOSE */
|
2022-03-04 11:36:29 -08:00
|
|
|
MPTCP_MIB_MPRSTTX, /* Transmit a MP_RST */
|
|
|
|
MPTCP_MIB_MPRSTRX, /* Received a MP_RST */
|
2021-07-09 17:20:51 -07:00
|
|
|
MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */
|
2021-08-13 15:15:46 -07:00
|
|
|
MPTCP_MIB_SUBFLOWSTALE, /* Subflows entered 'stale' status */
|
|
|
|
MPTCP_MIB_SUBFLOWRECOVER, /* Subflows returned to active status after being stale */
|
2022-05-04 14:54:05 -07:00
|
|
|
MPTCP_MIB_SNDWNDSHARED, /* Subflow snd wnd is overridden by msk's one */
|
2022-05-04 14:54:08 -07:00
|
|
|
MPTCP_MIB_RCVWNDSHARED, /* Subflow rcv wnd is overridden by msk's one */
|
|
|
|
MPTCP_MIB_RCVWNDCONFLICTUPDATE, /* subflow rcv wnd is overridden by msk's one due to
|
|
|
|
* conflict with another subflow while updating msk rcv wnd
|
|
|
|
*/
|
|
|
|
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
|
2023-12-22 13:47:22 +01:00
|
|
|
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
|
2024-09-09 22:09:23 +02:00
|
|
|
MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
|
2020-03-27 14:48:50 -07:00
|
|
|
__MPTCP_MIB_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
#define LINUX_MIB_MPTCP_MAX __MPTCP_MIB_MAX
|
|
|
|
struct mptcp_mib {
|
|
|
|
unsigned long mibs[LINUX_MIB_MPTCP_MAX];
|
|
|
|
};
|
|
|
|
|
2023-05-17 12:16:16 -07:00
|
|
|
static inline void MPTCP_ADD_STATS(struct net *net,
|
|
|
|
enum linux_mptcp_mib_field field,
|
|
|
|
int val)
|
|
|
|
{
|
|
|
|
if (likely(net->mib.mptcp_statistics))
|
|
|
|
SNMP_ADD_STATS(net->mib.mptcp_statistics, field, val);
|
|
|
|
}
|
|
|
|
|
2020-03-27 14:48:50 -07:00
|
|
|
static inline void MPTCP_INC_STATS(struct net *net,
|
|
|
|
enum linux_mptcp_mib_field field)
|
|
|
|
{
|
|
|
|
if (likely(net->mib.mptcp_statistics))
|
|
|
|
SNMP_INC_STATS(net->mib.mptcp_statistics, field);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __MPTCP_INC_STATS(struct net *net,
|
|
|
|
enum linux_mptcp_mib_field field)
|
|
|
|
{
|
|
|
|
if (likely(net->mib.mptcp_statistics))
|
|
|
|
__SNMP_INC_STATS(net->mib.mptcp_statistics, field);
|
|
|
|
}
|
|
|
|
|
2023-12-22 13:47:22 +01:00
|
|
|
static inline void MPTCP_DEC_STATS(struct net *net,
|
|
|
|
enum linux_mptcp_mib_field field)
|
|
|
|
{
|
|
|
|
if (likely(net->mib.mptcp_statistics))
|
|
|
|
SNMP_DEC_STATS(net->mib.mptcp_statistics, field);
|
|
|
|
}
|
|
|
|
|
2020-03-27 14:48:50 -07:00
|
|
|
bool mptcp_mib_alloc(struct net *net);
|