mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 05:26:07 +00:00
1c1efc2af1
Create and free the buffer pool independently from the umem. Move these operations that are performed on the buffer pool from the umem create and destroy functions to new create and destroy functions just for the buffer pool. This so that in later commits we can instantiate multiple buffer pools per umem when sharing a umem between HW queues and/or devices. We also erradicate the back pointer from the umem to the buffer pool as this will not work when we introduce the possibility to have multiple buffer pools per umem. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-4-git-send-email-magnus.karlsson@intel.com
22 lines
682 B
C
22 lines
682 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* XDP user-space packet buffer
|
|
* Copyright(c) 2018 Intel Corporation.
|
|
*/
|
|
|
|
#ifndef XDP_UMEM_H_
|
|
#define XDP_UMEM_H_
|
|
|
|
#include <net/xdp_sock_drv.h>
|
|
|
|
void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
|
u16 queue_id);
|
|
void xdp_umem_clear_dev(struct xdp_umem *umem);
|
|
bool xdp_umem_validate_queues(struct xdp_umem *umem);
|
|
void xdp_get_umem(struct xdp_umem *umem);
|
|
void xdp_put_umem(struct xdp_umem *umem);
|
|
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
|
|
void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
|
|
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
|
|
|
|
#endif /* XDP_UMEM_H_ */
|