linux-next/include/linux/skb_array.h
Eric Dumazet a126061c80 ptr_ring: do not block hard interrupts in ptr_ring_resize_multiple()
Jakub added a lockdep_assert_no_hardirq() check in __page_pool_put_page()
to increase test coverage.

syzbot found a splat caused by hard irq blocking in
ptr_ring_resize_multiple() [1]

As current users of ptr_ring_resize_multiple() do not require
hard irqs being masked, replace it to only block BH.

Rename helpers to better reflect they are safe against BH only.

- ptr_ring_resize_multiple() to ptr_ring_resize_multiple_bh()
- skb_array_resize_multiple() to skb_array_resize_multiple_bh()

[1]

WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 __page_pool_put_page net/core/page_pool.c:709 [inline]
WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
Modules linked in:
CPU: 1 UID: 0 PID: 9150 Comm: syz.1.1052 Not tainted 6.11.0-rc3-syzkaller-00202-gf8669d7b5f5d #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/06/2024
RIP: 0010:__page_pool_put_page net/core/page_pool.c:709 [inline]
RIP: 0010:page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
Code: 74 0e e8 7c aa fb f7 eb 43 e8 75 aa fb f7 eb 3c 65 8b 1d 38 a8 6a 76 31 ff 89 de e8 a3 ae fb f7 85 db 74 0b e8 5a aa fb f7 90 <0f> 0b 90 eb 1d 65 8b 1d 15 a8 6a 76 31 ff 89 de e8 84 ae fb f7 85
RSP: 0018:ffffc9000bda6b58 EFLAGS: 00010083
RAX: ffffffff8997e523 RBX: 0000000000000000 RCX: 0000000000040000
RDX: ffffc9000fbd0000 RSI: 0000000000001842 RDI: 0000000000001843
RBP: 0000000000000000 R08: ffffffff8997df2c R09: 1ffffd40003a000d
R10: dffffc0000000000 R11: fffff940003a000e R12: ffffea0001d00040
R13: ffff88802e8a4000 R14: dffffc0000000000 R15: 00000000ffffffff
FS:  00007fb7aaf716c0(0000) GS:ffff8880b9300000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fa15a0d4b72 CR3: 00000000561b0000 CR4: 00000000003506f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
 <TASK>
 tun_ptr_free drivers/net/tun.c:617 [inline]
 __ptr_ring_swap_queue include/linux/ptr_ring.h:571 [inline]
 ptr_ring_resize_multiple_noprof include/linux/ptr_ring.h:643 [inline]
 tun_queue_resize drivers/net/tun.c:3694 [inline]
 tun_device_event+0xaaf/0x1080 drivers/net/tun.c:3714
 notifier_call_chain+0x19f/0x3e0 kernel/notifier.c:93
 call_netdevice_notifiers_extack net/core/dev.c:2032 [inline]
 call_netdevice_notifiers net/core/dev.c:2046 [inline]
 dev_change_tx_queue_len+0x158/0x2a0 net/core/dev.c:9024
 do_setlink+0xff6/0x41f0 net/core/rtnetlink.c:2923
 rtnl_setlink+0x40d/0x5a0 net/core/rtnetlink.c:3201
 rtnetlink_rcv_msg+0x73f/0xcf0 net/core/rtnetlink.c:6647
 netlink_rcv_skb+0x1e3/0x430 net/netlink/af_netlink.c:2550

Fixes: ff4e538c8c ("page_pool: add a lockdep check for recycling in hardirq")
Reported-by: syzbot+f56a5c5eac2b28439810@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/netdev/671e10df.050a0220.2b8c0f.01cf.GAE@google.com/T/
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Link: https://patch.msgid.link/20241217135121.326370-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-12-18 17:55:30 -08:00

221 lines
5.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct skb_array' datastructure.
*
* Author:
* Michael S. Tsirkin <mst@redhat.com>
*
* Copyright (C) 2016 Red Hat, Inc.
*
* Limited-size FIFO of skbs. Can be used more or less whenever
* sk_buff_head can be used, except you need to know the queue size in
* advance.
* Implemented as a type-safe wrapper around ptr_ring.
*/
#ifndef _LINUX_SKB_ARRAY_H
#define _LINUX_SKB_ARRAY_H 1
#ifdef __KERNEL__
#include <linux/ptr_ring.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#endif
struct skb_array {
struct ptr_ring ring;
};
/* Might be slightly faster than skb_array_full below, but callers invoking
* this in a loop must use a compiler barrier, for example cpu_relax().
*/
static inline bool __skb_array_full(struct skb_array *a)
{
return __ptr_ring_full(&a->ring);
}
static inline bool skb_array_full(struct skb_array *a)
{
return ptr_ring_full(&a->ring);
}
static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
{
return ptr_ring_produce(&a->ring, skb);
}
static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
{
return ptr_ring_produce_irq(&a->ring, skb);
}
static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
{
return ptr_ring_produce_bh(&a->ring, skb);
}
static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
{
return ptr_ring_produce_any(&a->ring, skb);
}
/* Might be slightly faster than skb_array_empty below, but only safe if the
* array is never resized. Also, callers invoking this in a loop must take care
* to use a compiler barrier, for example cpu_relax().
*/
static inline bool __skb_array_empty(struct skb_array *a)
{
return __ptr_ring_empty(&a->ring);
}
static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
{
return __ptr_ring_peek(&a->ring);
}
static inline bool skb_array_empty(struct skb_array *a)
{
return ptr_ring_empty(&a->ring);
}
static inline bool skb_array_empty_bh(struct skb_array *a)
{
return ptr_ring_empty_bh(&a->ring);
}
static inline bool skb_array_empty_irq(struct skb_array *a)
{
return ptr_ring_empty_irq(&a->ring);
}
static inline bool skb_array_empty_any(struct skb_array *a)
{
return ptr_ring_empty_any(&a->ring);
}
static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
{
return __ptr_ring_consume(&a->ring);
}
static inline struct sk_buff *skb_array_consume(struct skb_array *a)
{
return ptr_ring_consume(&a->ring);
}
static inline int skb_array_consume_batched(struct skb_array *a,
struct sk_buff **array, int n)
{
return ptr_ring_consume_batched(&a->ring, (void **)array, n);
}
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
static inline int skb_array_consume_batched_irq(struct skb_array *a,
struct sk_buff **array, int n)
{
return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
}
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
static inline int skb_array_consume_batched_any(struct skb_array *a,
struct sk_buff **array, int n)
{
return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
}
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
static inline int skb_array_consume_batched_bh(struct skb_array *a,
struct sk_buff **array, int n)
{
return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
}
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {
int len = skb->len;
if (skb_vlan_tag_present(skb))
len += VLAN_HLEN;
return len;
} else {
return 0;
}
}
static inline int skb_array_peek_len(struct skb_array *a)
{
return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
}
static inline int skb_array_peek_len_irq(struct skb_array *a)
{
return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
}
static inline int skb_array_peek_len_bh(struct skb_array *a)
{
return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
}
static inline int skb_array_peek_len_any(struct skb_array *a)
{
return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
}
static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_init_noprof(&a->ring, size, gfp);
}
#define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__))
static void __skb_array_destroy_skb(void *ptr)
{
kfree_skb(ptr);
}
static inline void skb_array_unconsume(struct skb_array *a,
struct sk_buff **skbs, int n)
{
ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
}
static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
}
static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
int nrings,
unsigned int size,
gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
nrings, size, gfp,
__skb_array_destroy_skb);
}
#define skb_array_resize_multiple_bh(...) \
alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void skb_array_cleanup(struct skb_array *a)
{
ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
}
#endif /* _LINUX_SKB_ARRAY_H */