mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
7361f9c3d7
Move the fill and completion rings from the umem to the buffer pool. This so that we in a later commit can share the umem between multiple HW queue ids. In this case, we need one fill and completion ring per queue id. As the buffer pool is per queue id and napi id this is a natural place for it and one umem struture can be shared between these buffer pools. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-5-git-send-email-magnus.karlsson@intel.com
271 lines
5.5 KiB
C
271 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* XDP user-space packet buffer
|
|
* Copyright(c) 2018 Intel Corporation.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/idr.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include "xdp_umem.h"
|
|
#include "xsk_queue.h"
|
|
|
|
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
|
|
|
|
static DEFINE_IDA(umem_ida);
|
|
|
|
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (!xs->tx)
|
|
return;
|
|
|
|
spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
|
|
list_add_rcu(&xs->list, &umem->xsk_tx_list);
|
|
spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
|
|
}
|
|
|
|
void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (!xs->tx)
|
|
return;
|
|
|
|
spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
|
|
list_del_rcu(&xs->list);
|
|
spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
|
|
}
|
|
|
|
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
|
|
{
|
|
unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
|
|
|
|
kfree(umem->pgs);
|
|
umem->pgs = NULL;
|
|
}
|
|
|
|
static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
|
|
{
|
|
if (umem->user) {
|
|
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
|
|
free_uid(umem->user);
|
|
}
|
|
}
|
|
|
|
void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
|
u16 queue_id)
|
|
{
|
|
umem->dev = dev;
|
|
umem->queue_id = queue_id;
|
|
|
|
dev_hold(dev);
|
|
}
|
|
|
|
void xdp_umem_clear_dev(struct xdp_umem *umem)
|
|
{
|
|
dev_put(umem->dev);
|
|
umem->dev = NULL;
|
|
umem->zc = false;
|
|
}
|
|
|
|
static void xdp_umem_release(struct xdp_umem *umem)
|
|
{
|
|
xdp_umem_clear_dev(umem);
|
|
|
|
ida_simple_remove(&umem_ida, umem->id);
|
|
|
|
xdp_umem_unpin_pages(umem);
|
|
|
|
xdp_umem_unaccount_pages(umem);
|
|
kfree(umem);
|
|
}
|
|
|
|
void xdp_get_umem(struct xdp_umem *umem)
|
|
{
|
|
refcount_inc(&umem->users);
|
|
}
|
|
|
|
void xdp_put_umem(struct xdp_umem *umem)
|
|
{
|
|
if (!umem)
|
|
return;
|
|
|
|
if (refcount_dec_and_test(&umem->users))
|
|
xdp_umem_release(umem);
|
|
}
|
|
|
|
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
|
|
{
|
|
unsigned int gup_flags = FOLL_WRITE;
|
|
long npgs;
|
|
int err;
|
|
|
|
umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
|
|
GFP_KERNEL | __GFP_NOWARN);
|
|
if (!umem->pgs)
|
|
return -ENOMEM;
|
|
|
|
mmap_read_lock(current->mm);
|
|
npgs = pin_user_pages(address, umem->npgs,
|
|
gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
|
|
mmap_read_unlock(current->mm);
|
|
|
|
if (npgs != umem->npgs) {
|
|
if (npgs >= 0) {
|
|
umem->npgs = npgs;
|
|
err = -ENOMEM;
|
|
goto out_pin;
|
|
}
|
|
err = npgs;
|
|
goto out_pgs;
|
|
}
|
|
return 0;
|
|
|
|
out_pin:
|
|
xdp_umem_unpin_pages(umem);
|
|
out_pgs:
|
|
kfree(umem->pgs);
|
|
umem->pgs = NULL;
|
|
return err;
|
|
}
|
|
|
|
static int xdp_umem_account_pages(struct xdp_umem *umem)
|
|
{
|
|
unsigned long lock_limit, new_npgs, old_npgs;
|
|
|
|
if (capable(CAP_IPC_LOCK))
|
|
return 0;
|
|
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
umem->user = get_uid(current_user());
|
|
|
|
do {
|
|
old_npgs = atomic_long_read(&umem->user->locked_vm);
|
|
new_npgs = old_npgs + umem->npgs;
|
|
if (new_npgs > lock_limit) {
|
|
free_uid(umem->user);
|
|
umem->user = NULL;
|
|
return -ENOBUFS;
|
|
}
|
|
} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
|
|
new_npgs) != old_npgs);
|
|
return 0;
|
|
}
|
|
|
|
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
|
{
|
|
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
|
|
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
|
|
u64 npgs, addr = mr->addr, size = mr->len;
|
|
unsigned int chunks, chunks_per_page;
|
|
int err;
|
|
|
|
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
|
|
/* Strictly speaking we could support this, if:
|
|
* - huge pages, or*
|
|
* - using an IOMMU, or
|
|
* - making sure the memory area is consecutive
|
|
* but for now, we simply say "computer says no".
|
|
*/
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
|
|
XDP_UMEM_USES_NEED_WAKEUP))
|
|
return -EINVAL;
|
|
|
|
if (!unaligned_chunks && !is_power_of_2(chunk_size))
|
|
return -EINVAL;
|
|
|
|
if (!PAGE_ALIGNED(addr)) {
|
|
/* Memory area has to be page size aligned. For
|
|
* simplicity, this might change.
|
|
*/
|
|
return -EINVAL;
|
|
}
|
|
|
|
if ((addr + size) < addr)
|
|
return -EINVAL;
|
|
|
|
npgs = size >> PAGE_SHIFT;
|
|
if (npgs > U32_MAX)
|
|
return -EINVAL;
|
|
|
|
chunks = (unsigned int)div_u64(size, chunk_size);
|
|
if (chunks == 0)
|
|
return -EINVAL;
|
|
|
|
if (!unaligned_chunks) {
|
|
chunks_per_page = PAGE_SIZE / chunk_size;
|
|
if (chunks < chunks_per_page || chunks % chunks_per_page)
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
|
|
return -EINVAL;
|
|
|
|
umem->size = size;
|
|
umem->headroom = headroom;
|
|
umem->chunk_size = chunk_size;
|
|
umem->chunks = chunks;
|
|
umem->npgs = (u32)npgs;
|
|
umem->pgs = NULL;
|
|
umem->user = NULL;
|
|
umem->flags = mr->flags;
|
|
INIT_LIST_HEAD(&umem->xsk_tx_list);
|
|
spin_lock_init(&umem->xsk_tx_list_lock);
|
|
|
|
refcount_set(&umem->users, 1);
|
|
|
|
err = xdp_umem_account_pages(umem);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xdp_umem_pin_pages(umem, (unsigned long)addr);
|
|
if (err)
|
|
goto out_account;
|
|
|
|
return 0;
|
|
|
|
out_account:
|
|
xdp_umem_unaccount_pages(umem);
|
|
return err;
|
|
}
|
|
|
|
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
|
|
{
|
|
struct xdp_umem *umem;
|
|
int err;
|
|
|
|
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
|
|
if (!umem)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
|
|
if (err < 0) {
|
|
kfree(umem);
|
|
return ERR_PTR(err);
|
|
}
|
|
umem->id = err;
|
|
|
|
err = xdp_umem_reg(umem, mr);
|
|
if (err) {
|
|
ida_simple_remove(&umem_ida, umem->id);
|
|
kfree(umem);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
return umem;
|
|
}
|